Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -3910,8 +3910,9 @@ Builder.CreateStore(FrameAddr, Buf); // Store the stack pointer to the setjmp buffer. - Value *StackAddr = - Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); + Value *StackAddr = Builder.CreateStackSave(); + assert(Buf.getPointer()->getType() == StackAddr->getType()); + Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); Builder.CreateStore(StackAddr, StackSaveSlot); Index: clang/lib/CodeGen/CGCall.cpp =================================================================== --- clang/lib/CodeGen/CGCall.cpp +++ clang/lib/CodeGen/CGCall.cpp @@ -4277,15 +4277,13 @@ assert(!StackBase); // Save the stack. - llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); - StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); + StackBase = CGF.Builder.CreateStackSave("inalloca.save"); } void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { if (StackBase) { // Restore the stack after the call. - llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); - CGF.Builder.CreateCall(F, StackBase); + CGF.Builder.CreateStackRestore(StackBase); } } Index: clang/lib/CodeGen/CGDecl.cpp =================================================================== --- clang/lib/CodeGen/CGDecl.cpp +++ clang/lib/CodeGen/CGDecl.cpp @@ -579,8 +579,7 @@ bool isRedundantBeforeReturn() override { return true; } void Emit(CodeGenFunction &CGF, Flags flags) override { llvm::Value *V = CGF.Builder.CreateLoad(Stack); - llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); - CGF.Builder.CreateCall(F, V); + CGF.Builder.CreateStackRestore(V); } }; @@ -1629,10 +1628,10 @@ if (!DidCallStackSave) { // Save the stack. Address Stack = - CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack"); + CreateDefaultAlignTempAlloca(AllocaInt8PtrTy, "saved_stack"); - llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); - llvm::Value *V = Builder.CreateCall(F); + llvm::Value *V = Builder.CreateStackSave(); + assert(V->getType() == AllocaInt8PtrTy); Builder.CreateStore(V, Stack); DidCallStackSave = true; Index: clang/test/CodeGen/2006-01-13-StackSave.c =================================================================== --- clang/test/CodeGen/2006-01-13-StackSave.c +++ clang/test/CodeGen/2006-01-13-StackSave.c @@ -1,6 +1,6 @@ // PR691 // RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s -// CHECK: call ptr @llvm.stacksave() +// CHECK: call ptr @llvm.stacksave.p0() extern void external(int[*]); Index: clang/test/CodeGen/vla.c =================================================================== --- clang/test/CodeGen/vla.c +++ clang/test/CodeGen/vla.c @@ -40,16 +40,16 @@ // rdar://8403108 // CHECK-LABEL: define{{.*}} void @f_8403108 void f_8403108(unsigned x) { - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() char s1[x]; while (1) { - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() char s2[x]; if (1) break; - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr } - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr } // pr7827 Index: clang/test/CodeGenCXX/builtin-is-constant-evaluated.cpp =================================================================== --- clang/test/CodeGenCXX/builtin-is-constant-evaluated.cpp +++ clang/test/CodeGenCXX/builtin-is-constant-evaluated.cpp @@ -78,7 +78,7 @@ // CHECK-ARR: %x2 = alloca [42 x i8], char x2[std::is_constant_evaluated() && __builtin_is_constant_evaluated() ? 42 : RANDU()]; - // CHECK-ARR: call ptr @llvm.stacksave() + // CHECK-ARR: call ptr @llvm.stacksave.p0() // CHECK-ARR: %vla = alloca i8, i64 13, char x3[std::is_constant_evaluated() || __builtin_is_constant_evaluated() ? RANDU() : 13]; } Index: clang/test/CodeGenCXX/c99-variable-length-array.cpp =================================================================== --- clang/test/CodeGenCXX/c99-variable-length-array.cpp +++ clang/test/CodeGenCXX/c99-variable-length-array.cpp @@ -13,7 +13,7 @@ void f(int argc, const char* argv[]) { // CHECK: call void @_ZN1XC1Ev X x; - // CHECK: call ptr @llvm.stacksave( + // CHECK: call ptr @llvm.stacksave.p0( const char *argv2[argc]; // CHECK: call void @_ZN1YC1Ev Y y; @@ -21,7 +21,7 @@ argv2[i] = argv[i]; // CHECK: call void @_ZN1YD1Ev - // CHECK: call void @llvm.stackrestore + // CHECK: call void @llvm.stackrestore.p0 // CHECK: call void @_ZN1XD1Ev // CHECK: ret void } Index: clang/test/CodeGenCXX/inalloca-overaligned.cpp =================================================================== --- clang/test/CodeGenCXX/inalloca-overaligned.cpp +++ clang/test/CodeGenCXX/inalloca-overaligned.cpp @@ -37,7 +37,7 @@ // CHECK-LABEL: define dso_local noundef i32 @"?pass_inalloca_overaligned@@Y{{.*}}" // CHECK: [[TMP:%[^ ]*]] = alloca %struct.OverAligned, align 64 -// CHECK: call ptr @llvm.stacksave() +// CHECK: call ptr @llvm.stacksave.p0() // CHECK: alloca inalloca <{ %struct.NonTrivial, ptr }> // Construct OverAligned into TMP. Index: clang/test/CodeGenCXX/inalloca-stmtexpr.cpp =================================================================== --- clang/test/CodeGenCXX/inalloca-stmtexpr.cpp +++ clang/test/CodeGenCXX/inalloca-stmtexpr.cpp @@ -47,5 +47,5 @@ // CHECK: br i1 // CHECK: br label %out // CHECK: call void @"?inalloca@@YAXUFoo@@0@Z"(ptr inalloca(<{ %struct.Foo, %struct.Foo }>) %{{.*}}) -// CHECK: call void @llvm.stackrestore(ptr %inalloca.save) +// CHECK: call void @llvm.stackrestore.p0(ptr %inalloca.save) // CHECK: out: Index: clang/test/CodeGenCXX/inheriting-constructor.cpp =================================================================== --- clang/test/CodeGenCXX/inheriting-constructor.cpp +++ clang/test/CodeGenCXX/inheriting-constructor.cpp @@ -140,7 +140,7 @@ // On Win32, the inalloca call can't be forwarded so we force inlining. // WIN32: %[[TMP:.*]] = alloca - // WIN32: call ptr @llvm.stacksave() + // WIN32: call ptr @llvm.stacksave.p0() // WIN32: %[[ARGMEM:.*]] = alloca inalloca // WIN32: call {{.*}} @"??0Q@@QAE@H@Z"(ptr {{[^,]*}} %[[TMP]], i32 4) // WIN32: %[[ARG3:.*]] = getelementptr {{.*}} %[[ARGMEM]] @@ -153,7 +153,7 @@ // WIN32: %[[ARG4:.*]] = getelementptr {{.*}} %[[ARGMEM]] // WIN32: store ptr %[[TMP]], ptr %[[ARG4]] // WIN32: call {{.*}} @"??0A@inalloca_nonvirt@@QAE@UQ@@H0$$QAU2@@Z"(ptr{{[^,]*}}, ptr inalloca(<{{.*}}>) %[[ARGMEM]]) - // WIN32: call void @llvm.stackrestore( + // WIN32: call void @llvm.stackrestore.p0( // WIN32: call {{.*}} @"??0Z@@QAE@XZ"( // WIN32: call {{.*}} @"??1Q@@QAE@XZ"( @@ -176,7 +176,7 @@ // On Win32, the inalloca call can't be forwarded so we force inlining. // WIN32: %[[TMP:.*]] = alloca - // WIN32: call ptr @llvm.stacksave() + // WIN32: call ptr @llvm.stacksave.p0() // WIN32: %[[ARGMEM:.*]] = alloca inalloca // WIN32: call {{.*}} @"??0Q@@QAE@H@Z"(ptr {{[^,]*}} %[[TMP]], i32 4) // WIN32: %[[ARG3:.*]] = getelementptr {{.*}} %[[ARGMEM]] @@ -189,7 +189,7 @@ // WIN32: %[[ARG4:.*]] = getelementptr {{.*}} %[[ARGMEM]] // WIN32: store ptr %[[TMP]], ptr %[[ARG4]] // WIN32: call {{.*}} @"??0A@inalloca_nonvirt@@QAE@UQ@@H0$$QAU2@@Z"(ptr{{[^,]*}}, ptr inalloca(<{{.*}}>) %[[ARGMEM]]) - // WIN32: call void @llvm.stackrestore( + // WIN32: call void @llvm.stackrestore.p0( // WIN32: call {{.*}} @"??0Z@@QAE@XZ"( // WIN32: call {{.*}} @"??1Q@@QAE@XZ"( @@ -216,7 +216,7 @@ // On Win32, the inalloca call can't be forwarded so we force inlining. // WIN32: %[[TMP:.*]] = alloca - // WIN32: call ptr @llvm.stacksave() + // WIN32: call ptr @llvm.stacksave.p0() // WIN32: %[[ARGMEM:.*]] = alloca inalloca // WIN32: call {{.*}} @"??0Q@@QAE@H@Z"(ptr {{[^,]*}} %[[TMP]], i32 4) // WIN32: %[[ARG3:.*]] = getelementptr {{.*}} %[[ARGMEM]] @@ -235,7 +235,7 @@ // WIN32: %[[ARG4:.*]] = getelementptr {{.*}} %[[ARGMEM]] // WIN32: store ptr %[[TMP]], ptr %[[ARG4]] // WIN32: call {{.*}} @"??0A@inalloca_virt@@QAE@UQ@@H0$$QAU2@@Z"(ptr{{[^,]*}}, ptr inalloca(<{{.*}}>) %[[ARGMEM]]) - // WIN32: call void @llvm.stackrestore( + // WIN32: call void @llvm.stackrestore.p0( // WIN32: br // // Note that if we jumped directly to here we would fail to stackrestore and @@ -267,7 +267,7 @@ // On Win32, the inalloca call can't be forwarded so we force inlining. // WIN32: %[[TMP:.*]] = alloca - // WIN32: call ptr @llvm.stacksave() + // WIN32: call ptr @llvm.stacksave.p0() // WIN32: %[[ARGMEM:.*]] = alloca inalloca // WIN32: call {{.*}} @"??0Q@@QAE@H@Z"(ptr {{[^,]*}} %[[TMP]], i32 4) // WIN32: %[[ARG3:.*]] = getelementptr {{.*}} %[[ARGMEM]] @@ -285,7 +285,7 @@ // WIN32: %[[ARG4:.*]] = getelementptr {{.*}} %[[ARGMEM]] // WIN32: store ptr %[[TMP]], ptr %[[ARG4]] // WIN32: call {{.*}} @"??0A@inalloca_virt@@QAE@UQ@@H0$$QAU2@@Z"(ptr{{[^,]*}}, ptr inalloca(<{{.*}}>) %[[ARGMEM]]) - // WIN32: call void @llvm.stackrestore( + // WIN32: call void @llvm.stackrestore.p0( // WIN32: br // // WIN32: call {{.*}} @"??0Z@@QAE@XZ"( Index: clang/test/CodeGenCXX/microsoft-abi-arg-order.cpp =================================================================== --- clang/test/CodeGenCXX/microsoft-abi-arg-order.cpp +++ clang/test/CodeGenCXX/microsoft-abi-arg-order.cpp @@ -39,7 +39,7 @@ // things as we unwind. // // X86-LABEL: define dso_local void @"?call_foo@@YAXXZ"() -// X86: call ptr @llvm.stacksave() +// X86: call ptr @llvm.stacksave.p0() // X86: %[[argmem:[^ ]*]] = alloca inalloca [[argmem_ty]] // X86: %[[arg3:[^ ]*]] = getelementptr inbounds [[argmem_ty]], ptr %[[argmem]], i32 0, i32 2 // X86: call x86_thiscallcc noundef ptr @"??0A@@QAE@H@Z"(ptr {{[^,]*}} %[[arg3]], i32 noundef 3) @@ -48,7 +48,7 @@ // X86: %[[arg1:[^ ]*]] = getelementptr inbounds [[argmem_ty]], ptr %[[argmem]], i32 0, i32 0 // X86: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@H@Z"(ptr {{[^,]*}} %[[arg1]], i32 noundef 1) // X86: call void @"?foo@@YAXUA@@00@Z"(ptr inalloca([[argmem_ty]]) %[[argmem]]) -// X86: call void @llvm.stackrestore +// X86: call void @llvm.stackrestore.p0 // X86: ret void // // lpad2: Index: clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp =================================================================== --- clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp +++ clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp @@ -16,7 +16,7 @@ // With exceptions, we need to clean up at least one of these temporaries. // WIN32-LABEL: define dso_local void @"?HasEHCleanup@@YAXXZ"() {{.*}} { -// WIN32: %[[base:.*]] = call ptr @llvm.stacksave() +// WIN32: %[[base:.*]] = call ptr @llvm.stacksave.p0() // If this call throws, we have to restore the stack. // WIN32: call void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) // If this call throws, we have to cleanup the first temporary. @@ -41,7 +41,7 @@ // With exceptions, we need to clean up at least one of these temporaries. // WIN32-LABEL: define dso_local void @"?HasEHCleanupNoexcept@@YAXXZ"() {{.*}} { -// WIN32: %[[base:.*]] = call ptr @llvm.stacksave() +// WIN32: %[[base:.*]] = call ptr @llvm.stacksave.p0() // WIN32: invoke void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) // WIN32: invoke void @"?getA@@YA?AUA@@XZ"(ptr sret(%struct.A) align 4 %{{.*}}) // WIN32: invoke noundef i32 @"?TakesTwo@@YAHUA@@0@Z" @@ -61,7 +61,7 @@ // WIN32-LABEL: define dso_local noundef i32 @"?HasDeactivatedCleanups@@YAHXZ"() {{.*}} { // WIN32: %[[isactive:.*]] = alloca i1 -// WIN32: call ptr @llvm.stacksave() +// WIN32: call ptr @llvm.stacksave.p0() // WIN32: %[[argmem:.*]] = alloca inalloca [[argmem_ty:<{ %struct.A, %struct.A }>]] // WIN32: %[[arg1:.*]] = getelementptr inbounds [[argmem_ty]], ptr %[[argmem]], i32 0, i32 1 // WIN32: call x86_thiscallcc noundef ptr @"??0A@@QAE@XZ" @@ -97,7 +97,7 @@ // WIN32-LABEL: define dso_local noundef i32 @"?HasConditionalCleanup@@YAH_N@Z"(i1 noundef zeroext %{{.*}}) {{.*}} { // WIN32: store i1 false // WIN32: br i1 -// WIN32: call ptr @llvm.stacksave() +// WIN32: call ptr @llvm.stacksave.p0() // WIN32: call x86_thiscallcc noundef ptr @"??0A@@QAE@XZ"(ptr {{[^,]*}} %{{.*}}) // WIN32: store i1 true // WIN32: invoke x86_thiscallcc noundef ptr @"??0A@@QAE@XZ"(ptr {{[^,]*}} %{{.*}}) Index: clang/test/CodeGenCXX/vla-consruct.cpp =================================================================== --- clang/test/CodeGenCXX/vla-consruct.cpp +++ clang/test/CodeGenCXX/vla-consruct.cpp @@ -35,7 +35,7 @@ // CHECK-NEXT: [[t2:%.+]] = load i32, ptr [[n_addr]] // CHECK-NEXT: [[add:%.+]] = add nsw i32 [[t2]], 1 // CHECK-NEXT: [[t3:%.+]] = zext i32 [[add]] to i64 - // CHECK-NEXT: [[t4:%.+]] = call ptr @llvm.stacksave() + // CHECK-NEXT: [[t4:%.+]] = call ptr @llvm.stacksave.p0() // CHECK-NEXT: store ptr [[t4]], ptr [[saved_stack]] // CHECK-NEXT: [[t5:%.+]] = mul nuw i64 [[t1]], [[t3]] // CHECK-NEXT: [[vla:%.+]] = alloca [[struct_S]], i64 [[t5]] @@ -97,7 +97,7 @@ // CHECK: [[arraydestroy_done2]] // CHECK-NEXT: [[t17:%.+]] = load ptr, ptr [[saved_stack]] - // CHECK-NEXT: call void @llvm.stackrestore(ptr [[t17]]) + // CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[t17]]) // CHECK: ret void // CHECK: [[lpad]] Index: clang/test/CodeGenCXX/vla-lambda-capturing.cpp =================================================================== --- clang/test/CodeGenCXX/vla-lambda-capturing.cpp +++ clang/test/CodeGenCXX/vla-lambda-capturing.cpp @@ -37,9 +37,9 @@ // CHECK: [[N:%.+]] = load [[INTPTR_T]], ptr [[N_ADDR]] // CHECK: [[BUFFER_ADDR:%.+]] = getelementptr inbounds [[CAP_TYPE1]], ptr [[THIS]], i{{.+}} 0, i{{.+}} 1 // CHECK: [[BUFFER:%.+]] = load ptr, ptr [[BUFFER_ADDR]] -// CHECK: call ptr @llvm.stacksave() +// CHECK: call ptr @llvm.stacksave.p0() // CHECK: alloca [[INTPTR_T]], [[INTPTR_T]] [[N]] -// CHECK: call void @llvm.stackrestore( +// CHECK: call void @llvm.stackrestore.p0( // CHECK: ret void template @@ -82,18 +82,18 @@ // CHECK: define linkonce_odr {{.*}}void [[F_INT]]([[INTPTR_T]] // CHECK: [[SIZE:%.+]] = add -// CHECK: call ptr @llvm.stacksave() +// CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[BUFFER_ADDR:%.+]] = alloca [[INTPTR_T]], [[INTPTR_T]] [[SIZE]] // CHECK: [[CAP_SIZE_REF:%.+]] = getelementptr inbounds [[CAP_TYPE2]], ptr [[CAP_ARG:%.+]], i{{.+}} 0, i{{.+}} 0 // CHECK: store [[INTPTR_T]] [[SIZE]], ptr [[CAP_SIZE_REF]] // CHECK: [[CAP_BUFFER_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE2]], ptr [[CAP_ARG]], i{{.+}} 0, i{{.+}} 1 // CHECK: store ptr [[BUFFER_ADDR]], ptr [[CAP_BUFFER_ADDR_REF]] // CHECK: call{{.*}} void [[F_INT_LAMBDA:@.+]](ptr {{[^,]*}} [[CAP_ARG]]) -// CHECK: call void @llvm.stackrestore( +// CHECK: call void @llvm.stackrestore.p0( // CHECK: ret void // CHECK: void [[B_INT]]([[INTPTR_T]] // CHECK: [[SIZE1:%.+]] = call {{.*}}[[INTPTR_T]] -// CHECK: call ptr @llvm.stacksave() +// CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[BUFFER2_ADDR:%.+]] = alloca [[INTPTR_T]], [[INTPTR_T]] [[SIZE1]] // CHECK: [[SIZE2:%.+]] = add // CHECK: [[BUFFER1_ADDR:%.+]] = alloca [[INTPTR_T]], [[INTPTR_T]] @@ -108,16 +108,16 @@ // CHECK: [[CAP_BUFFER2_ADDR_REF:%.+]] = getelementptr inbounds [[CAP_TYPE3]], ptr [[CAP_ARG]], i{{.+}} 0, i{{.+}} 4 // CHECK: store ptr [[BUFFER2_ADDR]], ptr [[CAP_BUFFER2_ADDR_REF]] // CHECK: call{{.*}} void [[B_INT_LAMBDA:@.+]](ptr {{[^,]*}} [[CAP_ARG]]) -// CHECK: call void @llvm.stackrestore( +// CHECK: call void @llvm.stackrestore.p0( // CHECK: ret void // CHECK: define linkonce_odr{{.*}} void [[F_INT_LAMBDA]](ptr // CHECK: [[THIS:%.+]] = load ptr, ptr // CHECK: [[SIZE_REF:%.+]] = getelementptr inbounds [[CAP_TYPE2]], ptr [[THIS]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE:%.+]] = load [[INTPTR_T]], ptr [[SIZE_REF]] -// CHECK: call ptr @llvm.stacksave() +// CHECK: call ptr @llvm.stacksave.p0() // CHECK: alloca [[INTPTR_T]], [[INTPTR_T]] [[SIZE]] -// CHECK: call void @llvm.stackrestore( +// CHECK: call void @llvm.stackrestore.p0( // CHECK: ret void // CHECK: define linkonce_odr{{.*}} void [[B_INT_LAMBDA]](ptr Index: clang/test/CodeGenObjC/arc.m =================================================================== --- clang/test/CodeGenObjC/arc.m +++ clang/test/CodeGenObjC/arc.m @@ -483,7 +483,7 @@ // CHECK-NEXT: [[DIM:%.*]] = zext i32 [[T0]] to i64 // Save the stack pointer. - // CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.stacksave() + // CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.stacksave.p0() // CHECK-NEXT: store ptr [[T0]], ptr [[SAVED_STACK]] // Allocate the VLA. @@ -509,7 +509,7 @@ // CHECK-NEXT: br i1 [[EQ]], // CHECK: [[T0:%.*]] = load ptr, ptr [[SAVED_STACK]] - // CHECK-NEXT: call void @llvm.stackrestore(ptr [[T0]]) + // CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[T0]]) // CHECK-NEXT: ret void } @@ -526,7 +526,7 @@ // CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[N]], align 4 // CHECK-NEXT: [[DIM:%.*]] = zext i32 [[T0]] to i64 - // CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.stacksave() + // CHECK-NEXT: [[T0:%.*]] = call ptr @llvm.stacksave.p0() // CHECK-NEXT: store ptr [[T0]], ptr [[SAVED_STACK]] @@ -558,7 +558,7 @@ // CHECK-NEXT: br i1 [[EQ]], // CHECK: [[T0:%.*]] = load ptr, ptr [[SAVED_STACK]] - // CHECK-NEXT: call void @llvm.stackrestore(ptr [[T0]]) + // CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[T0]]) // CHECK-NEXT: ret void } Index: clang/test/OpenMP/amdgcn_target_device_vla.cpp =================================================================== --- clang/test/OpenMP/amdgcn_target_device_vla.cpp +++ clang/test/OpenMP/amdgcn_target_device_vla.cpp @@ -384,7 +384,7 @@ // CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5) // CHECK-NEXT: [[I4:%.*]] = alloca i32, align 4, addrspace(5) // CHECK-NEXT: [[N:%.*]] = alloca i32, align 4, addrspace(5) -// CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8, addrspace(5) +// CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr addrspace(5), align 4, addrspace(5) // CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8, addrspace(5) // CHECK-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5) // CHECK-NEXT: [[J11:%.*]] = alloca i32, align 4, addrspace(5) @@ -462,8 +462,8 @@ // CHECK-NEXT: store i32 10, ptr [[N_ASCAST]], align 4 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[N_ASCAST]], align 4 // CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64 -// CHECK-NEXT: [[TMP16:%.*]] = call ptr @llvm.stacksave() -// CHECK-NEXT: store ptr [[TMP16]], ptr [[SAVED_STACK_ASCAST]], align 8 +// CHECK-NEXT: [[TMP16:%.*]] = call ptr addrspace(5) @llvm.stacksave.p5() +// CHECK-NEXT: store ptr addrspace(5) [[TMP16]], ptr [[SAVED_STACK_ASCAST]], align 4 // CHECK-NEXT: [[VLA7:%.*]] = alloca i32, i64 [[TMP15]], align 4, addrspace(5) // CHECK-NEXT: [[VLA7_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA7]] to ptr // CHECK-NEXT: store i64 [[TMP15]], ptr [[__VLA_EXPR0_ASCAST]], align 8 @@ -517,8 +517,8 @@ // CHECK-NEXT: store i32 [[INC21]], ptr [[J11_ASCAST]], align 4 // CHECK-NEXT: br label [[FOR_COND12]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK: for.end22: -// CHECK-NEXT: [[TMP31:%.*]] = load ptr, ptr [[SAVED_STACK_ASCAST]], align 8 -// CHECK-NEXT: call void @llvm.stackrestore(ptr [[TMP31]]) +// CHECK-NEXT: [[TMP31:%.*]] = load ptr addrspace(5), ptr [[SAVED_STACK_ASCAST]], align 4 +// CHECK-NEXT: call void @llvm.stackrestore.p5(ptr addrspace(5) [[TMP31]]) // CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK: omp.body.continue: // CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] Index: clang/test/OpenMP/debug-info-openmp-array.cpp =================================================================== --- clang/test/OpenMP/debug-info-openmp-array.cpp +++ clang/test/OpenMP/debug-info-openmp-array.cpp @@ -25,7 +25,7 @@ // CHECK1-NEXT: call void @llvm.dbg.declare(metadata ptr [[I]], metadata [[META14:![0-9]+]], metadata !DIExpression()), !dbg [[DBG15:![0-9]+]] // CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[M_ADDR]], align 4, !dbg [[DBG16:![0-9]+]] // CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG17:![0-9]+]] -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave(), !dbg [[DBG17]] +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0(), !dbg [[DBG17]] // CHECK1-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8, !dbg [[DBG17]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG17]] // CHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8, !dbg [[DBG17]] @@ -33,7 +33,7 @@ // CHECK1-NEXT: call void @llvm.dbg.declare(metadata ptr [[VLA]], metadata [[META21:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4:[0-9]+]], i32 3, ptr @_Z1fi.omp_outlined, ptr [[M_ADDR]], i64 [[TMP1]], ptr [[VLA]]), !dbg [[DBG26:![0-9]+]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8, !dbg [[DBG27:![0-9]+]] -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP3]]), !dbg [[DBG27]] +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP3]]), !dbg [[DBG27]] // CHECK1-NEXT: ret void, !dbg [[DBG27]] // // Index: clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp +++ clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp @@ -191,7 +191,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = sdiv exact i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP13:%.*]] = add nuw i64 [[TMP12]], 1 // CHECK1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP15]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP13]], align 16 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[__VLA_EXPR0]], align 8 @@ -408,7 +408,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP105]]) // CHECK1-NEXT: ret void // // @@ -536,21 +536,21 @@ // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[META12:![0-9]+]] -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias [[META12]] +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias [[META12]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 // CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[META12]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 // CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 // CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 @@ -570,7 +570,7 @@ // CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] // CHECK1-NEXT: [[TMP31:%.*]] = add nuw i64 [[TMP30]], 1 // CHECK1-NEXT: [[TMP32:%.*]] = mul nuw i64 [[TMP31]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP31]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias [[META12]] +// CHECK1-NEXT: store i64 [[TMP31]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 // CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP12]], align 8 // CHECK1-NEXT: [[TMP34:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP33]], ptr [[TMP20]]) // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 @@ -580,8 +580,8 @@ // CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP20]] to i64 // CHECK1-NEXT: [[TMP40:%.*]] = sub i64 [[TMP38]], [[TMP39]] // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr i8, ptr [[TMP34]], i64 [[TMP40]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias [[META12]] -// CHECK1-NEXT: store ptr [[TMP41]], ptr [[TMP4_I]], align 8, !noalias [[META12]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 +// CHECK1-NEXT: store ptr [[TMP41]], ptr [[TMP4_I]], align 8, !noalias !12 // CHECK1-NEXT: ret i32 0 // // Index: clang/test/OpenMP/for_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/for_reduction_codegen.cpp +++ clang/test/OpenMP/for_reduction_codegen.cpp @@ -594,7 +594,7 @@ // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 1 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 10, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP4]], align 16 @@ -618,7 +618,7 @@ // CHECK1-NEXT: [[CALL10:%.*]] = call noundef i32 @_Z5tmainIiLi42EET_v() // CHECK1-NEXT: store i32 [[CALL10]], ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP9]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP9]]) // CHECK1-NEXT: [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[VVAR2]], i32 0, i32 0 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN11]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -1036,7 +1036,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = sdiv exact i64 [[TMP11]], ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP13:%.*]] = add nuw i64 [[TMP12]], 1 // CHECK1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP15]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA7:%.*]] = alloca i32, i64 [[TMP13]], align 16 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[__VLA_EXPR0]], align 8 @@ -1231,7 +1231,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE56]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done56: // CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP70]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP70]]) // CHECK1-NEXT: ret void // // @@ -1326,7 +1326,7 @@ // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] // CHECK1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 // CHECK1-NEXT: [[TMP6:%.*]] = udiv exact i64 [[TMP5]], ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP7]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA3:%.*]] = alloca i32, i64 [[TMP6]], align 16 // CHECK1-NEXT: store i64 [[TMP6]], ptr [[__VLA_EXPR0]], align 8 @@ -1492,7 +1492,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE45:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done45: // CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP42]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP42]]) // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP44]]) @@ -1771,7 +1771,7 @@ // CHECK1-NEXT: [[TMP8:%.*]] = sdiv exact i64 [[TMP7]], ptrtoint (ptr getelementptr ([[STRUCT_S]], ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP9:%.*]] = add nuw i64 [[TMP8]], 1 // CHECK1-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], ptrtoint (ptr getelementptr ([[STRUCT_S]], ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP11:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP11:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP11]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP9]], align 16 // CHECK1-NEXT: store i64 [[TMP9]], ptr [[__VLA_EXPR0]], align 8 @@ -1897,7 +1897,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done21: // CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP42]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP42]]) // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP44]]) @@ -3022,7 +3022,7 @@ // CHECK1-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (ptr getelementptr ([[STRUCT_S:%.*]], ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP8:%.*]] = add nuw i64 [[TMP7]], 1 // CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], ptrtoint (ptr getelementptr ([[STRUCT_S]], ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP10:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP10:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP10]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S]], i64 [[TMP8]], align 16 // CHECK1-NEXT: store i64 [[TMP8]], ptr [[__VLA_EXPR0]], align 8 @@ -3146,7 +3146,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done20: // CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP40]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP40]]) // CHECK1-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[TMP41]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP42]]) Index: clang/test/OpenMP/for_reduction_codegen_UDR.cpp =================================================================== --- clang/test/OpenMP/for_reduction_codegen_UDR.cpp +++ clang/test/OpenMP/for_reduction_codegen_UDR.cpp @@ -660,7 +660,7 @@ // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 1 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 10, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP4]], align 16 @@ -676,7 +676,7 @@ // CHECK1-NEXT: [[CALL10:%.*]] = call noundef i32 @_Z5tmainIiLi42EET_v() // CHECK1-NEXT: store i32 [[CALL10]], ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP7]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP7]]) // CHECK1-NEXT: [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [5 x %struct.S.0], ptr [[VVAR2]], i32 0, i32 0 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN11]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -1089,7 +1089,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = sdiv exact i64 [[TMP11]], ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP13:%.*]] = add nuw i64 [[TMP12]], 1 // CHECK1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP15]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA7:%.*]] = alloca i32, i64 [[TMP13]], align 16 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[__VLA_EXPR0]], align 8 @@ -1292,7 +1292,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE63]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done63: // CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP68]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP68]]) // CHECK1-NEXT: ret void // // @@ -1413,7 +1413,7 @@ // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP0]], [[TMP1]] // CHECK1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 // CHECK1-NEXT: [[TMP6:%.*]] = udiv exact i64 [[TMP5]], ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP7]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA3:%.*]] = alloca i32, i64 [[TMP6]], align 16 // CHECK1-NEXT: store i64 [[TMP6]], ptr [[__VLA_EXPR0]], align 8 @@ -1587,7 +1587,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE52:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done52: // CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP40]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP40]]) // CHECK1-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[TMP41]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP42]]) @@ -1683,7 +1683,7 @@ // CHECK1-NEXT: [[TMP8:%.*]] = sdiv exact i64 [[TMP7]], ptrtoint (ptr getelementptr ([[STRUCT_S_0]], ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP9:%.*]] = add nuw i64 [[TMP8]], 1 // CHECK1-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], ptrtoint (ptr getelementptr ([[STRUCT_S_0]], ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP11:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP11:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP11]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca [[STRUCT_S_0]], i64 [[TMP9]], align 16 // CHECK1-NEXT: store i64 [[TMP9]], ptr [[__VLA_EXPR0]], align 8 @@ -1815,7 +1815,7 @@ // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE27]], label [[ARRAYDESTROY_BODY]] // CHECK1: arraydestroy.done27: // CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP42]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP42]]) // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP44]]) @@ -3383,7 +3383,7 @@ // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 1 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK3-NEXT: [[TMP10:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP10:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP10]], ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: [[TMP11:%.*]] = mul nuw i64 10, [[TMP9]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP11]], align 16 @@ -3499,7 +3499,7 @@ // CHECK3-NEXT: [[CALL69:%.*]] = call noundef i32 @_Z5tmainIiLi42EET_v() // CHECK3-NEXT: store i32 [[CALL69]], ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP34:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP34]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP34]]) // CHECK3-NEXT: [[ARRAY_BEGIN70:%.*]] = getelementptr inbounds [5 x %struct.S.0], ptr [[VVAR2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN70]], i64 5 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/for_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/for_reduction_task_codegen.cpp +++ clang/test/OpenMP/for_reduction_task_codegen.cpp @@ -84,7 +84,7 @@ // CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP12:%.*]] = add nuw i64 [[TMP11]], 1 // CHECK1-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP14:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP14:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP14]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP12]], align 16 // CHECK1-NEXT: store i64 [[TMP12]], ptr [[__VLA_EXPR0]], align 8 @@ -302,7 +302,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP104]]) // CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP105]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP106]]) Index: clang/test/OpenMP/for_scan_codegen.cpp =================================================================== --- clang/test/OpenMP/for_scan_codegen.cpp +++ clang/test/OpenMP/for_scan_codegen.cpp @@ -17,7 +17,7 @@ void baz(int n) { static float a[10]; static double b; - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[A_BUF_SIZE:%.+]] = mul nuw i64 10, [[NUM_ELEMS:%[^,]+]] // float a_buffer[10][n]; @@ -28,7 +28,7 @@ #pragma omp for reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -53,7 +53,7 @@ // CHECK: [[DISPATCH]]: // CHECK: br label %[[INPUT_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(); @@ -116,7 +116,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -148,13 +148,13 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr // CHECK: call void @__kmpc_barrier( } - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[A_BUF_SIZE:%.+]] = mul nuw i64 10, [[NUM_ELEMS:%[^,]+]] // float a_buffer[10][n]; @@ -165,7 +165,7 @@ #pragma omp for reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -198,7 +198,7 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(); @@ -261,7 +261,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -292,9 +292,9 @@ // CHECK: br label %[[SCAN_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr // CHECK: call void @__kmpc_barrier( } } Index: clang/test/OpenMP/for_simd_scan_codegen.cpp =================================================================== --- clang/test/OpenMP/for_simd_scan_codegen.cpp +++ clang/test/OpenMP/for_simd_scan_codegen.cpp @@ -17,7 +17,7 @@ void baz(int n) { static float a[10]; static double b; - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[A_BUF_SIZE:%.+]] = mul nuw i64 10, [[NUM_ELEMS:%[^,]+]] // float a_buffer[10][n]; @@ -28,7 +28,7 @@ #pragma omp for simd reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -53,7 +53,7 @@ // CHECK: [[DISPATCH]]: // CHECK: br label %[[INPUT_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(); @@ -116,7 +116,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -148,13 +148,13 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr // CHECK: call void @__kmpc_barrier( } - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[A_BUF_SIZE:%.+]] = mul nuw i64 10, [[NUM_ELEMS:%[^,]+]] // float a_buffer[10][n]; @@ -165,7 +165,7 @@ #pragma omp for simd reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -198,7 +198,7 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(); @@ -261,7 +261,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -292,9 +292,9 @@ // CHECK: br label %[[SCAN_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr // CHECK: call void @__kmpc_barrier( } } Index: clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp +++ clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp @@ -71,7 +71,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -163,7 +163,7 @@ // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP43]]) // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp +++ clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp @@ -71,7 +71,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -163,7 +163,7 @@ // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP43]]) // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -702,7 +702,7 @@ // CHECK3: arrayctor.cont: // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP1]], align 16 // CHECK3-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -743,7 +743,7 @@ // CHECK3-NEXT: store i32 5, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK3-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN6]], i64 5 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/parallel_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_codegen.cpp +++ clang/test/OpenMP/parallel_codegen.cpp @@ -83,7 +83,7 @@ // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -94,7 +94,7 @@ // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(ptr noundef [[TMP3]]) // CHECK1-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP4]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP4]]) // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK1-NEXT: ret i32 [[TMP5]] // @@ -157,13 +157,13 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16 // CHECK1-NEXT: store i64 [[TMP0]], ptr [[__VLA_EXPR0]], align 8 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @main.omp_outlined.1.omp_outlined, i64 [[TMP0]], ptr [[VLA1]], ptr [[GLOBAL]]) // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP2]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP2]]) // CHECK1-NEXT: ret void // // @@ -316,7 +316,7 @@ // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[ARGV_ADDR]], metadata [[META20:![0-9]+]], metadata !DIExpression()), !dbg [[DBG21:![0-9]+]] // CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !dbg [[DBG22:![0-9]+]] // CHECK2-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG23:![0-9]+]] -// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave(), !dbg [[DBG23]] +// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0(), !dbg [[DBG23]] // CHECK2-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8, !dbg [[DBG23]] // CHECK2-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG23]] // CHECK2-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8, !dbg [[DBG23]] @@ -329,7 +329,7 @@ // CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(ptr noundef [[TMP3]]), !dbg [[DBG36:![0-9]+]] // CHECK2-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4, !dbg [[DBG37:![0-9]+]] // CHECK2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8, !dbg [[DBG38:![0-9]+]] -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP4]]), !dbg [[DBG38]] +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP4]]), !dbg [[DBG38]] // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[RETVAL]], align 4, !dbg [[DBG38]] // CHECK2-NEXT: ret i32 [[TMP5]], !dbg [[DBG38]] // @@ -425,7 +425,7 @@ // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[VLA_ADDR]], metadata [[META81:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]] // CHECK2-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !dbg [[DBG82:![0-9]+]] // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[GLOBAL]], metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]] -// CHECK2-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave(), !dbg [[DBG82]] +// CHECK2-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0(), !dbg [[DBG82]] // CHECK2-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 8, !dbg [[DBG82]] // CHECK2-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16, !dbg [[DBG82]] // CHECK2-NEXT: store i64 [[TMP0]], ptr [[__VLA_EXPR0]], align 8, !dbg [[DBG82]] @@ -433,7 +433,7 @@ // CHECK2-NEXT: call void @llvm.dbg.declare(metadata ptr [[VLA1]], metadata [[META85:![0-9]+]], metadata !DIExpression()), !dbg [[DBG79]] // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3:[0-9]+]], i32 3, ptr @main.omp_outlined_debug__.1.omp_outlined, i64 [[TMP0]], ptr [[VLA1]], ptr [[GLOBAL]]), !dbg [[DBG82]] // CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8, !dbg [[DBG86:![0-9]+]] -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP2]]), !dbg [[DBG86]] +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP2]]), !dbg [[DBG86]] // CHECK2-NEXT: ret void, !dbg [[DBG88:![0-9]+]] // // @@ -725,7 +725,7 @@ // CHECK3-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16 // CHECK3-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -743,7 +743,7 @@ // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(ptr noundef [[TMP3]]) // CHECK3-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP4]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP4]]) // CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK3-NEXT: ret i32 [[TMP5]] // @@ -865,7 +865,7 @@ // CHECK4-NEXT: call void @llvm.dbg.declare(metadata ptr [[ARGV_ADDR]], metadata [[META20:![0-9]+]], metadata !DIExpression()), !dbg [[DBG19]] // CHECK4-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !dbg [[DBG21:![0-9]+]] // CHECK4-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG21]] -// CHECK4-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave(), !dbg [[DBG21]] +// CHECK4-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0(), !dbg [[DBG21]] // CHECK4-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8, !dbg [[DBG21]] // CHECK4-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG21]] // CHECK4-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8, !dbg [[DBG21]] @@ -885,7 +885,7 @@ // CHECK4-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIPPcEiT_(ptr noundef [[TMP3]]), !dbg [[DBG31]] // CHECK4-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4, !dbg [[DBG31]] // CHECK4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8, !dbg [[DBG32:![0-9]+]] -// CHECK4-NEXT: call void @llvm.stackrestore(ptr [[TMP4]]), !dbg [[DBG32]] +// CHECK4-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP4]]), !dbg [[DBG32]] // CHECK4-NEXT: [[TMP5:%.*]] = load i32, ptr [[RETVAL]], align 4, !dbg [[DBG32]] // CHECK4-NEXT: ret i32 [[TMP5]], !dbg [[DBG32]] // Index: clang/test/OpenMP/parallel_firstprivate_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_firstprivate_codegen.cpp +++ clang/test/OpenMP/parallel_firstprivate_codegen.cpp @@ -2623,7 +2623,7 @@ // CHECK17-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 -// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP6]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP3]], [[TMP5]] // CHECK17-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP7]], align 128 @@ -2634,7 +2634,7 @@ // CHECK17-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 8, ptr @_Z10array_funcPfP2StiPe.omp_outlined, ptr [[TMP8]], ptr [[N_ADDR]], i64 [[TMP1]], ptr [[TMP9]], ptr [[TMP10]], i64 [[TMP3]], i64 [[TMP5]], ptr [[VLA]]) // CHECK17-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK17-NEXT: ret void // // @@ -2669,7 +2669,7 @@ // CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR3]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR5]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VLA2_ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP5:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP5]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP2]], [[TMP3]] // CHECK17-NEXT: [[VLA7:%.*]] = alloca double, i64 [[TMP6]], align 128 @@ -2685,7 +2685,7 @@ // CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[VLA1_ADDR]], align 8 // CHECK17-NEXT: call void @_ZN2St7St_funcEPS_iPe(ptr nonnull align 4 dereferenceable(8) [[ARRAYIDX]], ptr [[TMP10]], i32 [[TMP11]], ptr [[TMP12]]) // CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // CHECK17-NEXT: ret void // // @@ -2710,7 +2710,7 @@ // CHECK17-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 -// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP6]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP3]], [[TMP5]] // CHECK17-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP7]], align 128 @@ -2724,7 +2724,7 @@ // CHECK17-NEXT: [[TMP10:%.*]] = load ptr, ptr [[S_ADDR]], align 8 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 8, ptr @_ZN2St7St_funcEPS_iPe.omp_outlined, i64 [[TMP1]], ptr [[TMP9]], ptr [[THIS1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[VLA]], ptr [[N_ADDR]], ptr [[TMP10]]) // CHECK17-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK17-NEXT: ret void // // Index: clang/test/OpenMP/parallel_for_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_for_codegen.cpp +++ clang/test/OpenMP/parallel_for_codegen.cpp @@ -1208,7 +1208,7 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1217,7 +1217,7 @@ // CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @_Z12parallel_forPfi.omp_outlined, ptr [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]]) // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP5]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP5]]) // CHECK1-NEXT: ret void // // @@ -1249,7 +1249,7 @@ // CHECK1-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1333,7 +1333,7 @@ // CHECK1: omp.dispatch.end: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP24]]) // CHECK1-NEXT: ret void // CHECK1: terminate.lpad: // CHECK1-NEXT: [[TMP25:%.*]] = landingpad { ptr, i32 } @@ -2318,7 +2318,7 @@ // CHECK2-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK2-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK2-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16 // CHECK2-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2327,7 +2327,7 @@ // CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @_Z12parallel_forPfi.omp_outlined, ptr [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]]) // CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP5]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP5]]) // CHECK2-NEXT: ret void // // @@ -2359,7 +2359,7 @@ // CHECK2-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK2-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16 // CHECK2-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2443,7 +2443,7 @@ // CHECK2: omp.dispatch.end: // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK2-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP24]]) // CHECK2-NEXT: ret void // CHECK2: terminate.lpad: // CHECK2-NEXT: [[TMP25:%.*]] = landingpad { ptr, i32 } @@ -3428,7 +3428,7 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !dbg [[DBG105:![0-9]+]] // CHECK5-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG105]] -// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave(), !dbg [[DBG105]] +// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0(), !dbg [[DBG105]] // CHECK5-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8, !dbg [[DBG105]] // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16, !dbg [[DBG105]] // CHECK5-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8, !dbg [[DBG105]] @@ -3437,7 +3437,7 @@ // CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !dbg [[DBG106]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB32:[0-9]+]], i32 3, ptr @_Z12parallel_forPfi.omp_outlined, ptr [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]]), !dbg [[DBG106]] // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8, !dbg [[DBG107:![0-9]+]] -// CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP5]]), !dbg [[DBG107]] +// CHECK5-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP5]]), !dbg [[DBG107]] // CHECK5-NEXT: ret void, !dbg [[DBG107]] // // @@ -3469,7 +3469,7 @@ // CHECK5-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4, !dbg [[DBG110]] // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !dbg [[DBG110]] // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !dbg [[DBG110]] -// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave(), !dbg [[DBG109]] +// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0(), !dbg [[DBG109]] // CHECK5-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8, !dbg [[DBG109]] // CHECK5-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16, !dbg [[DBG109]] // CHECK5-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8, !dbg [[DBG109]] @@ -3553,7 +3553,7 @@ // CHECK5: omp.dispatch.end: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB31:[0-9]+]], i32 [[TMP4]]), !dbg [[DBG109]] // CHECK5-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8, !dbg [[DBG109]] -// CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]), !dbg [[DBG109]] +// CHECK5-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP24]]), !dbg [[DBG109]] // CHECK5-NEXT: ret void, !dbg [[DBG111]] // CHECK5: terminate.lpad: // CHECK5-NEXT: [[TMP25:%.*]] = landingpad { ptr, i32 } @@ -4538,7 +4538,7 @@ // CHECK6-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK6-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK6-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK6-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK6-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK6-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 16 // CHECK6-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4547,7 +4547,7 @@ // CHECK6-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @_Z12parallel_forPfi.omp_outlined, ptr [[A_ADDR]], i64 [[TMP1]], i64 [[TMP4]]) // CHECK6-NEXT: [[TMP5:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK6-NEXT: call void @llvm.stackrestore(ptr [[TMP5]]) +// CHECK6-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP5]]) // CHECK6-NEXT: ret void // // @@ -4579,7 +4579,7 @@ // CHECK6-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK6-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK6-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK6-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK6-NEXT: [[VLA1:%.*]] = alloca float, i64 [[TMP1]], align 16 // CHECK6-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4661,7 +4661,7 @@ // CHECK6: omp.dispatch.end: // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK6-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK6-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) +// CHECK6-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP24]]) // CHECK6-NEXT: ret void // // Index: clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp +++ clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp @@ -99,7 +99,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -316,7 +316,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP103:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP103]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP103]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/parallel_for_scan_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_for_scan_codegen.cpp +++ clang/test/OpenMP/parallel_for_scan_codegen.cpp @@ -18,7 +18,7 @@ static float a[10]; static double b; - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[A_BUF_SIZE:%.+]] = mul nuw i64 10, [[NUM_ELEMS:%[^,]+]] // float a_buffer[10][n]; @@ -42,12 +42,12 @@ // double b_buffer[10]; // CHECK: [[B_BUF:%.+]] = alloca double, i64 10, // CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call( - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr #pragma omp parallel for reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -72,7 +72,7 @@ // CHECK: [[DISPATCH]]: // CHECK: br label %[[INPUT_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(n); @@ -135,7 +135,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -167,14 +167,14 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( } #pragma omp parallel for reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -207,7 +207,7 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(n); @@ -270,7 +270,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -301,7 +301,7 @@ // CHECK: br label %[[SCAN_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( } } Index: clang/test/OpenMP/parallel_for_simd_scan_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_for_simd_scan_codegen.cpp +++ clang/test/OpenMP/parallel_for_simd_scan_codegen.cpp @@ -18,7 +18,7 @@ static float a[10]; static double b; - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: [[A_BUF_SIZE:%.+]] = mul nuw i64 10, [[NUM_ELEMS:%[^,]+]] // float a_buffer[10][n]; @@ -35,12 +35,12 @@ // double b_buffer[10]; // CHECK: [[B_BUF:%.+]] = alloca double, i64 10, // CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call( - // CHECK: call void @llvm.stackrestore(ptr + // CHECK: call void @llvm.stackrestore.p0(ptr #pragma omp parallel for simd reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -65,7 +65,7 @@ // CHECK: [[DISPATCH]]: // CHECK: br label %[[INPUT_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(); @@ -128,7 +128,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -160,14 +160,14 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( } #pragma omp parallel for simd reduction(inscan, +:a[:n], b) for (int i = 0; i < 10; ++i) { // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -200,7 +200,7 @@ // CHECK: br label %[[EXIT_INSCAN]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_barrier( foo(); @@ -263,7 +263,7 @@ // CHECK: [[OUTER_EXIT]]: bar(); // CHECK: call void @__kmpc_for_static_init_4( - // CHECK: call ptr @llvm.stacksave() + // CHECK: call ptr @llvm.stacksave.p0() // CHECK: store float 0.000000e+00, ptr % // CHECK: store double 0.000000e+00, ptr [[B_PRIV_ADDR:%.+]], // CHECK: br label %[[DISPATCH:[^,]+]] @@ -294,7 +294,7 @@ // CHECK: br label %[[SCAN_PHASE]] // CHECK: [[LOOP_CONTINUE]]: - // CHECK: call void @llvm.stackrestore(ptr % + // CHECK: call void @llvm.stackrestore.p0(ptr % // CHECK: call void @__kmpc_for_static_fini( } } Index: clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp +++ clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp @@ -88,7 +88,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -271,7 +271,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP96:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP96]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP96]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/parallel_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_reduction_codegen.cpp +++ clang/test/OpenMP/parallel_reduction_codegen.cpp @@ -363,7 +363,7 @@ // CHECK1-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1 // CHECK1-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16 // CHECK1-NEXT: store i64 [[TMP6]], ptr [[__VLA_EXPR0]], align 8 @@ -452,7 +452,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP34]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP34]]) // CHECK1-NEXT: ret void // // @@ -1643,7 +1643,7 @@ // CHECK3-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK3-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1 // CHECK3-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) -// CHECK3-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16 // CHECK3-NEXT: store i64 [[TMP6]], ptr [[__VLA_EXPR0]], align 8 @@ -1732,7 +1732,7 @@ // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: [[TMP34:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP34]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP34]]) // CHECK3-NEXT: ret void // // @@ -2153,7 +2153,7 @@ // CHECK4-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK4-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1 // CHECK4-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) -// CHECK4-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK4-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK4-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // CHECK4-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16 // CHECK4-NEXT: store i64 [[TMP6]], ptr [[__VLA_EXPR0]], align 8 @@ -2242,7 +2242,7 @@ // CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK4: .omp.reduction.default: // CHECK4-NEXT: [[TMP34:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK4-NEXT: call void @llvm.stackrestore(ptr [[TMP34]]) +// CHECK4-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP34]]) // CHECK4-NEXT: ret void // // Index: clang/test/OpenMP/parallel_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_reduction_task_codegen.cpp +++ clang/test/OpenMP/parallel_reduction_task_codegen.cpp @@ -88,7 +88,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -262,7 +262,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP92:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP92]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP92]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp +++ clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp @@ -97,7 +97,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -304,7 +304,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP104]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/reduction_implicit_map.cpp =================================================================== --- clang/test/OpenMP/reduction_implicit_map.cpp +++ clang/test/OpenMP/reduction_implicit_map.cpp @@ -545,7 +545,7 @@ // CHECK1-NEXT: [[TMP4:%.*]] = sdiv exact i64 [[TMP3]], ptrtoint (ptr getelementptr (double, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP5:%.*]] = add nuw i64 [[TMP4]], 1 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], ptrtoint (ptr getelementptr (double, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP7]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK1-NEXT: store i64 [[TMP5]], ptr [[__VLA_EXPR0]], align 8 @@ -692,7 +692,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP55]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP55]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/sections_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/sections_reduction_task_codegen.cpp +++ clang/test/OpenMP/sections_reduction_task_codegen.cpp @@ -98,7 +98,7 @@ // CHECK1-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP12:%.*]] = add nuw i64 [[TMP11]], 1 // CHECK1-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP14:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP14:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP14]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP12]], align 16 // CHECK1-NEXT: store i64 [[TMP12]], ptr [[__VLA_EXPR0]], align 8 @@ -306,7 +306,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP105]]) // CHECK1-NEXT: [[TMP106:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP106]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP107]]) Index: clang/test/OpenMP/target_codegen.cpp =================================================================== --- clang/test/OpenMP/target_codegen.cpp +++ clang/test/OpenMP/target_codegen.cpp @@ -531,7 +531,7 @@ // // CHECK: define {{.*}}[[FS1]] // -// CHECK: ptr @llvm.stacksave() +// CHECK: ptr @llvm.stacksave.p0() // CHECK-64: store i32 %{{.+}}, ptr [[B_CADDR:%.+]], // CHECK-64: [[B_CVAL:%.+]] = load i[[SZ]], ptr [[B_CADDR]], Index: clang/test/OpenMP/target_firstprivate_codegen.cpp =================================================================== --- clang/test/OpenMP/target_firstprivate_codegen.cpp +++ clang/test/OpenMP/target_firstprivate_codegen.cpp @@ -6115,7 +6115,7 @@ // CHECK0-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK0-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK0-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK0-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK0-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK0-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK0-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK0-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -6342,7 +6342,7 @@ // CHECK0: omp_offload.cont13: // CHECK0-NEXT: [[TMP115:%.*]] = load i32, ptr [[A]], align 4 // CHECK0-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK0-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK0-NEXT: ret i32 [[TMP115]] // // @@ -6395,7 +6395,7 @@ // CHECK0-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK0-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK0-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i64 40, i1 false) -// CHECK0-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK0-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK0-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // CHECK0-NEXT: [[VLA6:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK0-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -6431,7 +6431,7 @@ // CHECK0-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // CHECK0-NEXT: store i8 1, ptr [[Y]], align 8 // CHECK0-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK0-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK0-NEXT: ret void // // @@ -6516,7 +6516,7 @@ // CHECK0-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK0-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK0-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK0-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK0-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK0-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK0-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK0-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -6604,7 +6604,7 @@ // CHECK0-NEXT: [[TMP45:%.*]] = load i32, ptr [[B]], align 4 // CHECK0-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP45]] // CHECK0-NEXT: [[TMP46:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK0-NEXT: call void @llvm.stackrestore(ptr [[TMP46]]) +// CHECK0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP46]]) // CHECK0-NEXT: ret i32 [[ADD3]] // // @@ -6774,7 +6774,7 @@ // CHECK0-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 // CHECK0-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 // CHECK0-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK0-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK0-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK0-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK0-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP2]] // CHECK0-NEXT: [[VLA3:%.*]] = alloca i16, i64 [[TMP5]], align 2 @@ -6798,7 +6798,7 @@ // CHECK0-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 // CHECK0-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // CHECK0-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK0-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK0-NEXT: ret void // // @@ -6893,7 +6893,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -7120,7 +7120,7 @@ // CHECK1: omp_offload.cont13: // CHECK1-NEXT: [[TMP115:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK1-NEXT: ret i32 [[TMP115]] // // @@ -7173,7 +7173,7 @@ // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i64 40, i1 false) -// CHECK1-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA6:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -7209,7 +7209,7 @@ // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // CHECK1-NEXT: store i8 1, ptr [[Y]], align 8 // CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK1-NEXT: ret void // // @@ -7294,7 +7294,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -7382,7 +7382,7 @@ // CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP45]] // CHECK1-NEXT: [[TMP46:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP46]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP46]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -7552,7 +7552,7 @@ // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP2]] // CHECK1-NEXT: [[VLA3:%.*]] = alloca i16, i64 [[TMP5]], align 2 @@ -7576,7 +7576,7 @@ // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 // CHECK1-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK1-NEXT: ret void // // @@ -7670,7 +7670,7 @@ // CHECK2-NEXT: store i32 0, ptr [[A]], align 4 // CHECK2-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK2-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK2-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -7898,7 +7898,7 @@ // CHECK2: omp_offload.cont13: // CHECK2-NEXT: [[TMP115:%.*]] = load i32, ptr [[A]], align 4 // CHECK2-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK2-NEXT: ret i32 [[TMP115]] // // @@ -7951,7 +7951,7 @@ // CHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i32 40, i1 false) -// CHECK2-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 4 // CHECK2-NEXT: [[VLA6:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK2-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -7987,7 +7987,7 @@ // CHECK2-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // CHECK2-NEXT: store i8 1, ptr [[Y]], align 4 // CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK2-NEXT: ret void // // @@ -8071,7 +8071,7 @@ // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK2-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK2-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK2-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -8160,7 +8160,7 @@ // CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[B]], align 4 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP45]] // CHECK2-NEXT: [[TMP46:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP46]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP46]]) // CHECK2-NEXT: ret i32 [[ADD3]] // // @@ -8330,7 +8330,7 @@ // CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK2-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP1]], [[TMP2]] // CHECK2-NEXT: [[VLA3:%.*]] = alloca i16, i32 [[TMP5]], align 2 @@ -8354,7 +8354,7 @@ // CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 // CHECK2-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK2-NEXT: ret void // // @@ -8448,7 +8448,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -8676,7 +8676,7 @@ // CHECK3: omp_offload.cont13: // CHECK3-NEXT: [[TMP115:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK3-NEXT: ret i32 [[TMP115]] // // @@ -8729,7 +8729,7 @@ // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i32 40, i1 false) -// CHECK3-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA6:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -8765,7 +8765,7 @@ // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // CHECK3-NEXT: store i8 1, ptr [[Y]], align 4 // CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK3-NEXT: ret void // // @@ -8849,7 +8849,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -8938,7 +8938,7 @@ // CHECK3-NEXT: [[TMP45:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP45]] // CHECK3-NEXT: [[TMP46:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP46]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP46]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -9108,7 +9108,7 @@ // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: [[VLA3:%.*]] = alloca i16, i32 [[TMP5]], align 2 @@ -9132,7 +9132,7 @@ // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 // CHECK3-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK3-NEXT: ret void // // @@ -9211,7 +9211,7 @@ // SIMD-ONLY0-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY0-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY0-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY0-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // SIMD-ONLY0-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -9260,7 +9260,7 @@ // SIMD-ONLY0-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 8 // SIMD-ONLY0-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY0-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY0-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// SIMD-ONLY0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // SIMD-ONLY0-NEXT: ret i32 [[TMP14]] // // @@ -9315,7 +9315,7 @@ // SIMD-ONLY0-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY0-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY0-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY0-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // SIMD-ONLY0-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -9342,7 +9342,7 @@ // SIMD-ONLY0-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY0-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]] // SIMD-ONLY0-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY0-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// SIMD-ONLY0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // SIMD-ONLY0-NEXT: ret i32 [[ADD9]] // // @@ -9412,7 +9412,7 @@ // SIMD-ONLY01-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY01-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY01-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// SIMD-ONLY01-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY01-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY01-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY01-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // SIMD-ONLY01-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -9461,7 +9461,7 @@ // SIMD-ONLY01-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 8 // SIMD-ONLY01-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY01-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY01-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// SIMD-ONLY01-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // SIMD-ONLY01-NEXT: ret i32 [[TMP14]] // // @@ -9516,7 +9516,7 @@ // SIMD-ONLY01-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY01-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY01-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// SIMD-ONLY01-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY01-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY01-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY01-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // SIMD-ONLY01-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -9543,7 +9543,7 @@ // SIMD-ONLY01-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY01-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]] // SIMD-ONLY01-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY01-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// SIMD-ONLY01-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // SIMD-ONLY01-NEXT: ret i32 [[ADD9]] // // @@ -9612,7 +9612,7 @@ // SIMD-ONLY02-NEXT: store i32 0, ptr [[A]], align 4 // SIMD-ONLY02-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY02-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY02-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY02-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY02-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY02-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // SIMD-ONLY02-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -9660,7 +9660,7 @@ // SIMD-ONLY02-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 4 // SIMD-ONLY02-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY02-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY02-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// SIMD-ONLY02-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // SIMD-ONLY02-NEXT: ret i32 [[TMP12]] // // @@ -9714,7 +9714,7 @@ // SIMD-ONLY02-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // SIMD-ONLY02-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY02-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY02-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY02-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY02-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY02-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // SIMD-ONLY02-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -9741,7 +9741,7 @@ // SIMD-ONLY02-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY02-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]] // SIMD-ONLY02-NEXT: [[TMP10:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY02-NEXT: call void @llvm.stackrestore(ptr [[TMP10]]) +// SIMD-ONLY02-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP10]]) // SIMD-ONLY02-NEXT: ret i32 [[ADD9]] // // @@ -9810,7 +9810,7 @@ // SIMD-ONLY03-NEXT: store i32 0, ptr [[A]], align 4 // SIMD-ONLY03-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY03-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY03-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY03-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY03-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY03-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // SIMD-ONLY03-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -9858,7 +9858,7 @@ // SIMD-ONLY03-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 4 // SIMD-ONLY03-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY03-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY03-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// SIMD-ONLY03-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // SIMD-ONLY03-NEXT: ret i32 [[TMP12]] // // @@ -9912,7 +9912,7 @@ // SIMD-ONLY03-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // SIMD-ONLY03-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY03-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY03-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY03-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY03-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY03-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // SIMD-ONLY03-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -9939,7 +9939,7 @@ // SIMD-ONLY03-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY03-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]] // SIMD-ONLY03-NEXT: [[TMP10:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY03-NEXT: call void @llvm.stackrestore(ptr [[TMP10]]) +// SIMD-ONLY03-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP10]]) // SIMD-ONLY03-NEXT: ret i32 [[ADD9]] // // @@ -10037,7 +10037,7 @@ // TCHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // TCHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // TCHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i64 40, i1 false) -// TCHECK-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// TCHECK-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // TCHECK-NEXT: [[VLA6:%.*]] = alloca float, i64 [[TMP1]], align 4 // TCHECK-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -10073,7 +10073,7 @@ // TCHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // TCHECK-NEXT: store i8 1, ptr [[Y]], align 8 // TCHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// TCHECK-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// TCHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // TCHECK-NEXT: ret void // // @@ -10146,7 +10146,7 @@ // TCHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 // TCHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 // TCHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// TCHECK-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// TCHECK-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // TCHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP2]] // TCHECK-NEXT: [[VLA3:%.*]] = alloca i16, i64 [[TMP5]], align 2 @@ -10170,7 +10170,7 @@ // TCHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 // TCHECK-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // TCHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// TCHECK-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// TCHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // TCHECK-NEXT: ret void // // @@ -10243,7 +10243,7 @@ // TCHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // TCHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // TCHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i64 40, i1 false) -// TCHECK1-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// TCHECK1-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK1-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 8 // TCHECK1-NEXT: [[VLA6:%.*]] = alloca float, i64 [[TMP1]], align 4 // TCHECK1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -10279,7 +10279,7 @@ // TCHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // TCHECK1-NEXT: store i8 1, ptr [[Y]], align 8 // TCHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// TCHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// TCHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // TCHECK1-NEXT: ret void // // @@ -10352,7 +10352,7 @@ // TCHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 // TCHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 // TCHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// TCHECK1-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// TCHECK1-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK1-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // TCHECK1-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP2]] // TCHECK1-NEXT: [[VLA3:%.*]] = alloca i16, i64 [[TMP5]], align 2 @@ -10376,7 +10376,7 @@ // TCHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 // TCHECK1-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // TCHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// TCHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// TCHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // TCHECK1-NEXT: ret void // // @@ -10449,7 +10449,7 @@ // TCHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // TCHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // TCHECK2-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i32 40, i1 false) -// TCHECK2-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// TCHECK2-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK2-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 4 // TCHECK2-NEXT: [[VLA6:%.*]] = alloca float, i32 [[TMP1]], align 4 // TCHECK2-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -10485,7 +10485,7 @@ // TCHECK2-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // TCHECK2-NEXT: store i8 1, ptr [[Y]], align 4 // TCHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// TCHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// TCHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // TCHECK2-NEXT: ret void // // @@ -10558,7 +10558,7 @@ // TCHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 // TCHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 // TCHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// TCHECK2-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// TCHECK2-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK2-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 4 // TCHECK2-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP1]], [[TMP2]] // TCHECK2-NEXT: [[VLA3:%.*]] = alloca i16, i32 [[TMP5]], align 2 @@ -10582,7 +10582,7 @@ // TCHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 // TCHECK2-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // TCHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// TCHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// TCHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // TCHECK2-NEXT: ret void // // @@ -10655,7 +10655,7 @@ // TCHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // TCHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // TCHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B5]], ptr align 4 [[TMP0]], i32 40, i1 false) -// TCHECK3-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave() +// TCHECK3-NEXT: [[TMP8:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK3-NEXT: store ptr [[TMP8]], ptr [[SAVED_STACK]], align 4 // TCHECK3-NEXT: [[VLA6:%.*]] = alloca float, i32 [[TMP1]], align 4 // TCHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -10691,7 +10691,7 @@ // TCHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D9]], i32 0, i32 1 // TCHECK3-NEXT: store i8 1, ptr [[Y]], align 4 // TCHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// TCHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// TCHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // TCHECK3-NEXT: ret void // // @@ -10764,7 +10764,7 @@ // TCHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 // TCHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 // TCHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// TCHECK3-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// TCHECK3-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // TCHECK3-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 4 // TCHECK3-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP1]], [[TMP2]] // TCHECK3-NEXT: [[VLA3:%.*]] = alloca i16, i32 [[TMP5]], align 2 @@ -10788,7 +10788,7 @@ // TCHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 // TCHECK3-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2 // TCHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// TCHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// TCHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // TCHECK3-NEXT: ret void // // @@ -10833,7 +10833,7 @@ // SIMD-ONLY1-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// SIMD-ONLY1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY1-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY1-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // SIMD-ONLY1-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -10882,7 +10882,7 @@ // SIMD-ONLY1-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 8 // SIMD-ONLY1-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY1-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// SIMD-ONLY1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // SIMD-ONLY1-NEXT: ret i32 [[TMP14]] // // @@ -10937,7 +10937,7 @@ // SIMD-ONLY1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// SIMD-ONLY1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // SIMD-ONLY1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -10964,7 +10964,7 @@ // SIMD-ONLY1-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY1-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]] // SIMD-ONLY1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY1-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// SIMD-ONLY1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // SIMD-ONLY1-NEXT: ret i32 [[ADD9]] // // @@ -11034,7 +11034,7 @@ // SIMD-ONLY11-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY11-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// SIMD-ONLY11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY11-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // SIMD-ONLY11-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -11083,7 +11083,7 @@ // SIMD-ONLY11-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 8 // SIMD-ONLY11-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY11-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// SIMD-ONLY11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // SIMD-ONLY11-NEXT: ret i32 [[TMP14]] // // @@ -11138,7 +11138,7 @@ // SIMD-ONLY11-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // SIMD-ONLY11-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// SIMD-ONLY11-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY11-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY11-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY11-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // SIMD-ONLY11-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -11165,7 +11165,7 @@ // SIMD-ONLY11-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY11-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP10]] // SIMD-ONLY11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY11-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// SIMD-ONLY11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // SIMD-ONLY11-NEXT: ret i32 [[ADD9]] // // @@ -11234,7 +11234,7 @@ // SIMD-ONLY12-NEXT: store i32 0, ptr [[A]], align 4 // SIMD-ONLY12-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY12-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY12-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY12-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY12-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY12-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // SIMD-ONLY12-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -11282,7 +11282,7 @@ // SIMD-ONLY12-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 4 // SIMD-ONLY12-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY12-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY12-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// SIMD-ONLY12-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // SIMD-ONLY12-NEXT: ret i32 [[TMP12]] // // @@ -11336,7 +11336,7 @@ // SIMD-ONLY12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // SIMD-ONLY12-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY12-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY12-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY12-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY12-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // SIMD-ONLY12-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -11363,7 +11363,7 @@ // SIMD-ONLY12-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY12-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]] // SIMD-ONLY12-NEXT: [[TMP10:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY12-NEXT: call void @llvm.stackrestore(ptr [[TMP10]]) +// SIMD-ONLY12-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP10]]) // SIMD-ONLY12-NEXT: ret i32 [[ADD9]] // // @@ -11432,7 +11432,7 @@ // SIMD-ONLY13-NEXT: store i32 0, ptr [[A]], align 4 // SIMD-ONLY13-NEXT: store i16 0, ptr [[AA]], align 2 // SIMD-ONLY13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY13-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY13-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY13-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY13-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // SIMD-ONLY13-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -11480,7 +11480,7 @@ // SIMD-ONLY13-NEXT: store double [[INC]], ptr [[ARRAYIDX13]], align 4 // SIMD-ONLY13-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY13-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// SIMD-ONLY13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // SIMD-ONLY13-NEXT: ret i32 [[TMP12]] // // @@ -11534,7 +11534,7 @@ // SIMD-ONLY13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // SIMD-ONLY13-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // SIMD-ONLY13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// SIMD-ONLY13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // SIMD-ONLY13-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // SIMD-ONLY13-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -11561,7 +11561,7 @@ // SIMD-ONLY13-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4 // SIMD-ONLY13-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[TMP9]] // SIMD-ONLY13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// SIMD-ONLY13-NEXT: call void @llvm.stackrestore(ptr [[TMP10]]) +// SIMD-ONLY13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP10]]) // SIMD-ONLY13-NEXT: ret i32 [[ADD9]] // // Index: clang/test/OpenMP/target_has_device_addr_codegen_01.cpp =================================================================== --- clang/test/OpenMP/target_has_device_addr_codegen_01.cpp +++ clang/test/OpenMP/target_has_device_addr_codegen_01.cpp @@ -53,7 +53,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[A]], align 4 // CHECK-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32 // CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64 -// CHECK-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -138,7 +138,7 @@ // CHECK-NEXT: [[CONV1:%.*]] = fptosi float [[TMP41]] to i32 // CHECK-NEXT: store i32 [[CONV1]], ptr [[RETVAL]], align 4 // CHECK-NEXT: [[TMP42:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK-NEXT: call void @llvm.stackrestore(ptr [[TMP42]]) +// CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP42]]) // CHECK-NEXT: [[TMP43:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK-NEXT: ret i32 [[TMP43]] // @@ -361,7 +361,7 @@ // SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load float, ptr [[A]], align 4 // SIMD-ONLY0-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32 // SIMD-ONLY0-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64 -// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // SIMD-ONLY0-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // SIMD-ONLY0-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // SIMD-ONLY0-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -393,7 +393,7 @@ // SIMD-ONLY0-NEXT: [[CONV6:%.*]] = fptosi float [[TMP12]] to i32 // SIMD-ONLY0-NEXT: store i32 [[CONV6]], ptr [[RETVAL]], align 4 // SIMD-ONLY0-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// SIMD-ONLY0-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// SIMD-ONLY0-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // SIMD-ONLY0-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 // SIMD-ONLY0-NEXT: ret i32 [[TMP14]] // Index: clang/test/OpenMP/target_in_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/target_in_reduction_codegen.cpp +++ clang/test/OpenMP/target_in_reduction_codegen.cpp @@ -65,7 +65,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -157,7 +157,7 @@ // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP59]]) // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/target_parallel_codegen.cpp =================================================================== --- clang/test/OpenMP/target_parallel_codegen.cpp +++ clang/test/OpenMP/target_parallel_codegen.cpp @@ -328,7 +328,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -562,7 +562,7 @@ // CHECK1: omp_if.end20: // CHECK1-NEXT: [[TMP115:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK1-NEXT: ret i32 [[TMP115]] // // @@ -920,7 +920,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -1017,7 +1017,7 @@ // CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -1430,7 +1430,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -1665,7 +1665,7 @@ // CHECK3: omp_if.end20: // CHECK3-NEXT: [[TMP115:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK3-NEXT: ret i32 [[TMP115]] // // @@ -2022,7 +2022,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -2120,7 +2120,7 @@ // CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK3-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // Index: clang/test/OpenMP/target_parallel_for_codegen.cpp =================================================================== --- clang/test/OpenMP/target_parallel_for_codegen.cpp +++ clang/test/OpenMP/target_parallel_for_codegen.cpp @@ -356,7 +356,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -647,7 +647,7 @@ // CHECK1: omp_if.end21: // CHECK1-NEXT: [[TMP147:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP148:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP148]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP148]]) // CHECK1-NEXT: ret i32 [[TMP147]] // // @@ -1423,7 +1423,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -1520,7 +1520,7 @@ // CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -2030,7 +2030,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -2319,7 +2319,7 @@ // CHECK3: omp_if.end21: // CHECK3-NEXT: [[TMP145:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP146:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP146]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP146]]) // CHECK3-NEXT: ret i32 [[TMP145]] // // @@ -3092,7 +3092,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -3190,7 +3190,7 @@ // CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK3-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -5232,7 +5232,7 @@ // CHECK17-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK17-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK17-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -5523,7 +5523,7 @@ // CHECK17: omp_if.end21: // CHECK17-NEXT: [[TMP147:%.*]] = load i32, ptr [[A]], align 4 // CHECK17-NEXT: [[TMP148:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP148]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP148]]) // CHECK17-NEXT: ret i32 [[TMP147]] // // @@ -6299,7 +6299,7 @@ // CHECK17-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK17-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK17-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -6396,7 +6396,7 @@ // CHECK17-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK17-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK17-NEXT: ret i32 [[ADD3]] // // @@ -6906,7 +6906,7 @@ // CHECK19-NEXT: store i32 0, ptr [[A]], align 4 // CHECK19-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -7195,7 +7195,7 @@ // CHECK19: omp_if.end21: // CHECK19-NEXT: [[TMP145:%.*]] = load i32, ptr [[A]], align 4 // CHECK19-NEXT: [[TMP146:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP146]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP146]]) // CHECK19-NEXT: ret i32 [[TMP145]] // // @@ -7968,7 +7968,7 @@ // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK19-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK19-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -8066,7 +8066,7 @@ // CHECK19-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK19-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK19-NEXT: ret i32 [[ADD3]] // // Index: clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp +++ clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp @@ -112,7 +112,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -329,7 +329,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP103:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP103]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP103]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/target_parallel_for_simd_codegen.cpp =================================================================== --- clang/test/OpenMP/target_parallel_for_simd_codegen.cpp +++ clang/test/OpenMP/target_parallel_for_simd_codegen.cpp @@ -343,7 +343,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -612,7 +612,7 @@ // CHECK1: omp_if.end21: // CHECK1-NEXT: [[TMP133:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP134:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP134]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP134]]) // CHECK1-NEXT: ret i32 [[TMP133]] // // @@ -1351,7 +1351,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -1448,7 +1448,7 @@ // CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -1972,7 +1972,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -2239,7 +2239,7 @@ // CHECK3: omp_if.end21: // CHECK3-NEXT: [[TMP131:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP132:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP132]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP132]]) // CHECK3-NEXT: ret i32 [[TMP131]] // // @@ -2975,7 +2975,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -3073,7 +3073,7 @@ // CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK3-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -3599,7 +3599,7 @@ // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK5-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -3868,7 +3868,7 @@ // CHECK5: omp_if.end21: // CHECK5-NEXT: [[TMP133:%.*]] = load i32, ptr [[A]], align 4 // CHECK5-NEXT: [[TMP134:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP134]]) +// CHECK5-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP134]]) // CHECK5-NEXT: ret i32 [[TMP133]] // // @@ -4609,7 +4609,7 @@ // CHECK5-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -4725,7 +4725,7 @@ // CHECK5-NEXT: [[TMP55:%.*]] = load i32, ptr [[B]], align 4 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], [[TMP55]] // CHECK5-NEXT: [[TMP56:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP56]]) +// CHECK5-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP56]]) // CHECK5-NEXT: ret i32 [[ADD6]] // // @@ -5335,7 +5335,7 @@ // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -5602,7 +5602,7 @@ // CHECK7: omp_if.end21: // CHECK7-NEXT: [[TMP131:%.*]] = load i32, ptr [[A]], align 4 // CHECK7-NEXT: [[TMP132:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP132]]) +// CHECK7-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP132]]) // CHECK7-NEXT: ret i32 [[TMP131]] // // @@ -6340,7 +6340,7 @@ // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK7-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -6457,7 +6457,7 @@ // CHECK7-NEXT: [[TMP55:%.*]] = load i32, ptr [[B]], align 4 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], [[TMP55]] // CHECK7-NEXT: [[TMP56:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP56]]) +// CHECK7-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP56]]) // CHECK7-NEXT: ret i32 [[ADD6]] // // @@ -7077,7 +7077,7 @@ // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -7308,7 +7308,7 @@ // CHECK9-NEXT: store i8 96, ptr [[IT72]], align 1 // CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[A]], align 4 // CHECK9-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP59]]) // CHECK9-NEXT: ret i32 [[TMP58]] // // @@ -7365,7 +7365,7 @@ // CHECK9-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK9-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -7417,7 +7417,7 @@ // CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[B]], align 4 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] // CHECK9-NEXT: [[TMP16:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP16]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP16]]) // CHECK9-NEXT: ret i32 [[ADD10]] // // @@ -7555,7 +7555,7 @@ // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -7785,7 +7785,7 @@ // CHECK11-NEXT: store i8 96, ptr [[IT72]], align 1 // CHECK11-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 // CHECK11-NEXT: [[TMP57:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP57]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP57]]) // CHECK11-NEXT: ret i32 [[TMP56]] // // @@ -7841,7 +7841,7 @@ // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -7893,7 +7893,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[B]], align 4 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] // CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK11-NEXT: ret i32 [[ADD10]] // // @@ -8032,7 +8032,7 @@ // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -8263,7 +8263,7 @@ // CHECK13-NEXT: store i8 96, ptr [[IT72]], align 1 // CHECK13-NEXT: [[TMP58:%.*]] = load i32, ptr [[A]], align 4 // CHECK13-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP59]]) // CHECK13-NEXT: ret i32 [[TMP58]] // // @@ -8321,7 +8321,7 @@ // CHECK13-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK13-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -8419,7 +8419,7 @@ // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[B]], align 4 // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] // CHECK13-NEXT: [[TMP25:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP25]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP25]]) // CHECK13-NEXT: ret i32 [[ADD28]] // // @@ -8557,7 +8557,7 @@ // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -8787,7 +8787,7 @@ // CHECK15-NEXT: store i8 96, ptr [[IT72]], align 1 // CHECK15-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 // CHECK15-NEXT: [[TMP57:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP57]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP57]]) // CHECK15-NEXT: ret i32 [[TMP56]] // // @@ -8844,7 +8844,7 @@ // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK15-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -8942,7 +8942,7 @@ // CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[B]], align 4 // CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] // CHECK15-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP24]]) // CHECK15-NEXT: ret i32 [[ADD28]] // // Index: clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp +++ clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp @@ -101,7 +101,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -275,7 +275,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP92:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP92]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP92]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/target_private_codegen.cpp =================================================================== --- clang/test/OpenMP/target_private_codegen.cpp +++ clang/test/OpenMP/target_private_codegen.cpp @@ -100,7 +100,7 @@ // TCHECK: [[VLA_ADDR_REF:%.+]] = load i{{[0-9]+}}, ptr [[VLA_ADDR]], // TCHECK: [[VLA_ADDR_REF2:%.+]] = load i{{[0-9]+}}, ptr [[VLA_ADDR2]], // TCHECK: [[VLA_ADDR_REF4:%.+]] = load i{{[0-9]+}}, ptr [[VLA_ADDR4]], - // TCHECK: [[RET_STACK:%.+]] = call ptr @llvm.stacksave() + // TCHECK: [[RET_STACK:%.+]] = call ptr @llvm.stacksave.p0() // TCHECK: store ptr [[RET_STACK]], ptr [[SSTACK]], // TCHECK: [[VLA5:%.+]] = alloca float, i{{[0-9]+}} [[VLA_ADDR_REF]], // TCHECK: [[VLA6_SIZE:%.+]] = mul{{.+}} i{{[0-9]+}} [[VLA_ADDR_REF2]], [[VLA_ADDR_REF4]] @@ -138,7 +138,7 @@ // finish // [[RELOAD_SSTACK:%.+]] = load ptr, ptr [[SSTACK]], - // call ovid @llvm.stackrestore(ptr [[RELOAD_SSTACK]]) + // call ovid @llvm.stackrestore.p0(ptr [[RELOAD_SSTACK]]) // ret void return a; @@ -219,7 +219,7 @@ // TCHECK: [[TH_ADDR_REF:%.+]] = load ptr, ptr [[TH_ADDR]], // TCHECK: [[VLA_ADDR_REF:%.+]] = load i{{[0-9]+}}, ptr [[VLA_ADDR]], // TCHECK: [[VLA_ADDR_REF2:%.+]] = load i{{[0-9]+}}, ptr [[VLA_ADDR2]], - // TCHECK: [[RET_STACK:%.+]] = call ptr @llvm.stacksave() + // TCHECK: [[RET_STACK:%.+]] = call ptr @llvm.stacksave.p0() // TCHECK: store ptr [[RET_STACK:%.+]], ptr [[SSTACK]], // this->a = (double)b + 1.5; @@ -244,7 +244,7 @@ // finish // TCHECK: [[RELOAD_SSTACK:%.+]] = load ptr, ptr [[SSTACK]], - // TCHECK: call void @llvm.stackrestore(ptr [[RELOAD_SSTACK]]) + // TCHECK: call void @llvm.stackrestore.p0(ptr [[RELOAD_SSTACK]]) // TCHECK: ret void }; Index: clang/test/OpenMP/target_simd_codegen.cpp =================================================================== --- clang/test/OpenMP/target_simd_codegen.cpp +++ clang/test/OpenMP/target_simd_codegen.cpp @@ -475,7 +475,7 @@ // // CHECK: define {{.*}}[[FS1]] // -// CHECK: ptr @llvm.stacksave() +// CHECK: ptr @llvm.stacksave.p0() // CHECK-32: store i32 %{{.+}}, ptr %__vla_expr // OMP51: [[IF:%.+]] = icmp sgt i32 {{[^,]+}}, 60 // CHECK-64: store i32 %{{.+}}, ptr [[B_ADDR:%.+]], Index: clang/test/OpenMP/target_teams_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_codegen.cpp +++ clang/test/OpenMP/target_teams_codegen.cpp @@ -358,7 +358,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -736,7 +736,7 @@ // CHECK1: omp_offload.cont39: // CHECK1-NEXT: [[TMP192:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP193]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]]) // CHECK1-NEXT: ret i32 [[TMP192]] // // @@ -1322,7 +1322,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -1419,7 +1419,7 @@ // CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -1851,7 +1851,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -2230,7 +2230,7 @@ // CHECK3: omp_offload.cont39: // CHECK3-NEXT: [[TMP192:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP193]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]]) // CHECK3-NEXT: ret i32 [[TMP192]] // // @@ -2814,7 +2814,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -2912,7 +2912,7 @@ // CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK3-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // Index: clang/test/OpenMP/target_teams_distribute_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_codegen.cpp @@ -344,7 +344,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -644,7 +644,7 @@ // CHECK1: omp_if.end30: // CHECK1-NEXT: [[TMP154:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP155:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP155]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP155]]) // CHECK1-NEXT: ret i32 [[TMP154]] // // @@ -1345,7 +1345,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -1442,7 +1442,7 @@ // CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -2088,7 +2088,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -2389,7 +2389,7 @@ // CHECK3: omp_if.end30: // CHECK3-NEXT: [[TMP154:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP155:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP155]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP155]]) // CHECK3-NEXT: ret i32 [[TMP154]] // // @@ -3089,7 +3089,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -3187,7 +3187,7 @@ // CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK3-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // Index: clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp @@ -460,7 +460,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -564,7 +564,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -927,7 +927,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1032,7 +1032,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // Index: clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp @@ -1001,7 +1001,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1233,7 +1233,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP114]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP115:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP115]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP115]]) // CHECK9-NEXT: [[TMP116:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP116]] // @@ -2048,7 +2048,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2283,7 +2283,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP116]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP117:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP117]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP117]]) // CHECK11-NEXT: [[TMP118:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP118]] // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp @@ -611,7 +611,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -715,7 +715,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1279,7 +1279,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1384,7 +1384,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp @@ -1446,7 +1446,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1679,7 +1679,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP114]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP115:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP115]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP115]]) // CHECK9-NEXT: [[TMP116:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP116]] // @@ -3072,7 +3072,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3308,7 +3308,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP116]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP117:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP117]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP117]]) // CHECK11-NEXT: [[TMP118:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP118]] // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp @@ -107,7 +107,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -304,7 +304,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP90:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP90]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP90]]) // CHECK1-NEXT: ret void // // @@ -451,7 +451,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = sdiv exact i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP13:%.*]] = add nuw i64 [[TMP12]], 1 // CHECK1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP15]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP13]], align 16 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[__VLA_EXPR0]], align 8 @@ -668,7 +668,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP105]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp @@ -4341,7 +4341,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4727,7 +4727,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP190]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP191:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP191]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP191]]) // CHECK13-NEXT: [[TMP192:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP192]] // @@ -6998,7 +6998,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -7389,7 +7389,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP194]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP195:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP195]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP195]]) // CHECK15-NEXT: [[TMP196:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP196]] // @@ -9609,7 +9609,7 @@ // CHECK17-NEXT: store i32 100, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK17-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK17-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -9995,7 +9995,7 @@ // CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP190]]) // CHECK17-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK17-NEXT: [[TMP191:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP191]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP191]]) // CHECK17-NEXT: [[TMP192:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK17-NEXT: ret i32 [[TMP192]] // @@ -12266,7 +12266,7 @@ // CHECK19-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK19-NEXT: store i32 100, ptr [[N]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -12657,7 +12657,7 @@ // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP194]]) // CHECK19-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK19-NEXT: [[TMP195:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP195]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP195]]) // CHECK19-NEXT: [[TMP196:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK19-NEXT: ret i32 [[TMP196]] // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp @@ -783,7 +783,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -887,7 +887,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1503,7 +1503,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1608,7 +1608,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // @@ -2222,7 +2222,7 @@ // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -2330,7 +2330,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP29]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP29]]) // CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP30]] // @@ -2423,7 +2423,7 @@ // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -2529,7 +2529,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP27]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP27]]) // CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP28]] // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp @@ -1777,7 +1777,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2010,7 +2010,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP114]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP115:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP115]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP115]]) // CHECK9-NEXT: [[TMP116:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP116]] // @@ -3517,7 +3517,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3753,7 +3753,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP116]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP117:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP117]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP117]]) // CHECK11-NEXT: [[TMP118:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP118]] // @@ -5227,7 +5227,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -5385,7 +5385,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP37]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP38:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP38]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP38]]) // CHECK13-NEXT: [[TMP39:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP39]] // @@ -5546,7 +5546,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -5701,7 +5701,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP36]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP37:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP37]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP37]]) // CHECK15-NEXT: [[TMP38:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP38]] // Index: clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp @@ -5006,7 +5006,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -5392,7 +5392,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP190]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP191:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP191]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP191]]) // CHECK13-NEXT: [[TMP192:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP192]] // @@ -7853,7 +7853,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -8244,7 +8244,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP194]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP195:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP195]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP195]]) // CHECK15-NEXT: [[TMP196:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP196]] // @@ -10654,7 +10654,7 @@ // CHECK17-NEXT: store i32 100, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK17-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK17-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -11040,7 +11040,7 @@ // CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP190]]) // CHECK17-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK17-NEXT: [[TMP191:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP191]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP191]]) // CHECK17-NEXT: [[TMP192:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK17-NEXT: ret i32 [[TMP192]] // @@ -13501,7 +13501,7 @@ // CHECK19-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK19-NEXT: store i32 100, ptr [[N]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -13892,7 +13892,7 @@ // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP194]]) // CHECK19-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK19-NEXT: [[TMP195:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP195]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP195]]) // CHECK19-NEXT: [[TMP196:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK19-NEXT: ret i32 [[TMP196]] // @@ -16295,7 +16295,7 @@ // CHECK21-NEXT: store i32 100, ptr [[N]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK21-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK21-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK21-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK21-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK21-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -16553,7 +16553,7 @@ // CHECK21-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP60]]) // CHECK21-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK21-NEXT: [[TMP61:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK21-NEXT: call void @llvm.stackrestore(ptr [[TMP61]]) +// CHECK21-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP61]]) // CHECK21-NEXT: [[TMP62:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK21-NEXT: ret i32 [[TMP62]] // @@ -16802,7 +16802,7 @@ // CHECK23-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK23-NEXT: store i32 100, ptr [[N]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK23-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK23-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK23-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK23-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -17055,7 +17055,7 @@ // CHECK23-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP59]]) // CHECK23-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK23-NEXT: [[TMP60:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK23-NEXT: call void @llvm.stackrestore(ptr [[TMP60]]) +// CHECK23-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP60]]) // CHECK23-NEXT: [[TMP61:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK23-NEXT: ret i32 [[TMP61]] // Index: clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp @@ -341,7 +341,7 @@ // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -630,7 +630,7 @@ // CHECK1: omp_if.end28: // CHECK1-NEXT: [[TMP148:%.*]] = load i32, ptr [[A]], align 4 // CHECK1-NEXT: [[TMP149:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP149]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP149]]) // CHECK1-NEXT: ret i32 [[TMP148]] // // @@ -1340,7 +1340,7 @@ // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -1437,7 +1437,7 @@ // CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK1-NEXT: ret i32 [[ADD3]] // // @@ -2111,7 +2111,7 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -2401,7 +2401,7 @@ // CHECK3: omp_if.end28: // CHECK3-NEXT: [[TMP148:%.*]] = load i32, ptr [[A]], align 4 // CHECK3-NEXT: [[TMP149:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP149]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP149]]) // CHECK3-NEXT: ret i32 [[TMP148]] // // @@ -3110,7 +3110,7 @@ // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -3208,7 +3208,7 @@ // CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP46]] // CHECK3-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -3883,7 +3883,7 @@ // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK5-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -4172,7 +4172,7 @@ // CHECK5: omp_if.end28: // CHECK5-NEXT: [[TMP148:%.*]] = load i32, ptr [[A]], align 4 // CHECK5-NEXT: [[TMP149:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP149]]) +// CHECK5-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP149]]) // CHECK5-NEXT: ret i32 [[TMP148]] // // @@ -4884,7 +4884,7 @@ // CHECK5-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -4996,7 +4996,7 @@ // CHECK5-NEXT: [[TMP52:%.*]] = load i32, ptr [[B]], align 4 // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV]], [[TMP52]] // CHECK5-NEXT: [[TMP53:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP53]]) +// CHECK5-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP53]]) // CHECK5-NEXT: ret i32 [[ADD5]] // // @@ -5722,7 +5722,7 @@ // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 @@ -6012,7 +6012,7 @@ // CHECK7: omp_if.end28: // CHECK7-NEXT: [[TMP148:%.*]] = load i32, ptr [[A]], align 4 // CHECK7-NEXT: [[TMP149:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP149]]) +// CHECK7-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP149]]) // CHECK7-NEXT: ret i32 [[TMP148]] // // @@ -6723,7 +6723,7 @@ // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK7-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -6836,7 +6836,7 @@ // CHECK7-NEXT: [[TMP52:%.*]] = load i32, ptr [[B]], align 4 // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV]], [[TMP52]] // CHECK7-NEXT: [[TMP53:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP53]]) +// CHECK7-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP53]]) // CHECK7-NEXT: ret i32 [[ADD5]] // // @@ -7561,7 +7561,7 @@ // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -7757,7 +7757,7 @@ // CHECK9-NEXT: store i32 10, ptr [[I58]], align 4 // CHECK9-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 // CHECK9-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK9-NEXT: ret i32 [[TMP46]] // // @@ -7814,7 +7814,7 @@ // CHECK9-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK9-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -7866,7 +7866,7 @@ // CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[B]], align 4 // CHECK9-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP15]] // CHECK9-NEXT: [[TMP16:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP16]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP16]]) // CHECK9-NEXT: ret i32 [[ADD11]] // // @@ -8071,7 +8071,7 @@ // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -8266,7 +8266,7 @@ // CHECK11-NEXT: store i32 10, ptr [[I58]], align 4 // CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 // CHECK11-NEXT: [[TMP45:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP45]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP45]]) // CHECK11-NEXT: ret i32 [[TMP44]] // // @@ -8322,7 +8322,7 @@ // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -8374,7 +8374,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[B]], align 4 // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP14]] // CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK11-NEXT: ret i32 [[ADD11]] // // @@ -8580,7 +8580,7 @@ // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -8776,7 +8776,7 @@ // CHECK13-NEXT: store i32 10, ptr [[I58]], align 4 // CHECK13-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 // CHECK13-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP47]]) // CHECK13-NEXT: ret i32 [[TMP46]] // // @@ -8834,7 +8834,7 @@ // CHECK13-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK13-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 @@ -8932,7 +8932,7 @@ // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[B]], align 4 // CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP24]] // CHECK13-NEXT: [[TMP25:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP25]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP25]]) // CHECK13-NEXT: ret i32 [[ADD29]] // // @@ -9137,7 +9137,7 @@ // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -9332,7 +9332,7 @@ // CHECK15-NEXT: store i32 10, ptr [[I58]], align 4 // CHECK15-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 // CHECK15-NEXT: [[TMP45:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP45]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP45]]) // CHECK15-NEXT: ret i32 [[TMP44]] // // @@ -9389,7 +9389,7 @@ // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK15-NEXT: store i32 [[ADD]], ptr [[B]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 @@ -9487,7 +9487,7 @@ // CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[B]], align 4 // CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP23]] // CHECK15-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP24]]) // CHECK15-NEXT: ret i32 [[ADD29]] // // Index: clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp @@ -616,7 +616,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -720,7 +720,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1109,7 +1109,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1214,7 +1214,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // @@ -1601,7 +1601,7 @@ // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -1709,7 +1709,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP29]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP29]]) // CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP30]] // @@ -1802,7 +1802,7 @@ // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1908,7 +1908,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP27]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP27]]) // CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP28]] // Index: clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp +++ clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp @@ -1290,7 +1290,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1522,7 +1522,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP114]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP115:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP115]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP115]]) // CHECK9-NEXT: [[TMP116:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP116]] // @@ -2394,7 +2394,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2629,7 +2629,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP116]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP117:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP117]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP117]]) // CHECK11-NEXT: [[TMP118:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP118]] // @@ -3492,7 +3492,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -3649,7 +3649,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP37]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP38:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP38]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP38]]) // CHECK13-NEXT: [[TMP39:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP39]] // @@ -3804,7 +3804,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3958,7 +3958,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP36]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP37:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP37]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP37]]) // CHECK15-NEXT: [[TMP38:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP38]] // Index: clang/test/OpenMP/target_teams_generic_loop_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/target_teams_generic_loop_collapse_codegen.cpp +++ clang/test/OpenMP/target_teams_generic_loop_collapse_codegen.cpp @@ -611,7 +611,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -715,7 +715,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1279,7 +1279,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1384,7 +1384,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // Index: clang/test/OpenMP/task_affinity_codegen.cpp =================================================================== --- clang/test/OpenMP/task_affinity_codegen.cpp +++ clang/test/OpenMP/task_affinity_codegen.cpp @@ -46,7 +46,7 @@ // = + 1 constant affinity for affinity(a) // CHECK: [[NUM_ELEMS:%.+]] = add nuw i64 1, [[CONV]] - // CHECK: [[SV:%.+]] = call ptr @llvm.stacksave() + // CHECK: [[SV:%.+]] = call ptr @llvm.stacksave.p0() // CHECK: store ptr [[SV]], ptr [[SV_ADDR:%.+]], // kmp_task_affinity_info_t affs[]; @@ -121,7 +121,7 @@ // CHECK: [[DONE]]: // CHECK: call i32 @__kmpc_omp_reg_task_with_affinity(ptr @{{.+}} i32 [[GTID]], ptr [[TD]], i32 [[NAFFS]], ptr [[AFFS_ADDR]]) // CHECK: [[SV:%.+]] = load ptr, ptr [[SV_ADDR]], - // CHECK: call void @llvm.stackrestore(ptr [[SV]]) + // CHECK: call void @llvm.stackrestore.p0(ptr [[SV]]) #pragma omp task affinity(iterator(i=0:a): p[i]) affinity(a) ; return 0; Index: clang/test/OpenMP/task_codegen.c =================================================================== --- clang/test/OpenMP/task_codegen.c +++ clang/test/OpenMP/task_codegen.c @@ -54,7 +54,7 @@ // CHECK: [[SIZE1:%.+]] = add nuw i64 0, [[SZ]] // CHECK: [[SIZE2:%.+]] = add nuw i64 [[SIZE1]], [[SZ1]] // CHECK: [[SIZE:%.+]] = add nuw i64 [[SIZE2]], 2 - // CHECK: [[SV:%.+]] = call ptr @llvm.stacksave() + // CHECK: [[SV:%.+]] = call ptr @llvm.stacksave.p0() // CHECK: store ptr [[SV]], ptr [[SV_ADDR:%.+]], align 8 // CHECK: [[VLA:%.+]] = alloca %struct.kmp_depend_info, i64 [[SIZE]], // CHECK: [[SIZE32:%.+]] = trunc i64 [[SIZE]] to i32 @@ -103,7 +103,7 @@ // CHECK: store i64 [[ADD]], ptr [[DEP_COUNTER_ADDR]], align 8 // CHECK: call i32 @__kmpc_omp_task_with_deps(ptr @{{.+}}, i32 [[GTID]], ptr [[ALLOC]], i32 [[SIZE32]], ptr [[VLA]], i32 0, ptr null) // CHECK: [[SV:%.+]] = load ptr, ptr [[SV_ADDR]], align 8 - // CHECK: call void @llvm.stackrestore(ptr [[SV]]) + // CHECK: call void @llvm.stackrestore.p0(ptr [[SV]]) #pragma omp task depend(in: a, ([3][a][a])&b) depend(depobj: d, x) detach(evt) { #pragma omp taskgroup Index: clang/test/OpenMP/task_codegen.cpp =================================================================== --- clang/test/OpenMP/task_codegen.cpp +++ clang/test/OpenMP/task_codegen.cpp @@ -258,7 +258,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 10, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP4]], align 16 @@ -483,7 +483,7 @@ // CHECK1-NEXT: [[TMP150:%.*]] = load i32, ptr @a, align 4 // CHECK1-NEXT: store i32 [[TMP150]], ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP151:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP151]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP151]]) // CHECK1-NEXT: [[ARRAY_BEGIN32:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK1-NEXT: [[TMP152:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN32]], i64 2 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -1381,7 +1381,7 @@ // CHECK1-51: arrayctor.cont: // CHECK1-51-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 // CHECK1-51-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-51-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-51-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-51-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-51-NEXT: [[TMP4:%.*]] = mul nuw i64 10, [[TMP2]] // CHECK1-51-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP4]], align 16 @@ -1651,7 +1651,7 @@ // CHECK1-51-NEXT: [[TMP178:%.*]] = load i32, ptr @a, align 4 // CHECK1-51-NEXT: store i32 [[TMP178]], ptr [[RETVAL]], align 4 // CHECK1-51-NEXT: [[TMP179:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-51-NEXT: call void @llvm.stackrestore(ptr [[TMP179]]) +// CHECK1-51-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP179]]) // CHECK1-51-NEXT: [[ARRAY_BEGIN42:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK1-51-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN42]], i64 2 // CHECK1-51-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -2989,7 +2989,7 @@ // CHECK2: arrayctor.cont: // CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 // CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK2-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK2-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = mul nuw i64 10, [[TMP2]] // CHECK2-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP4]], align 16 @@ -3214,7 +3214,7 @@ // CHECK2-NEXT: [[TMP150:%.*]] = load i32, ptr @a, align 4 // CHECK2-NEXT: store i32 [[TMP150]], ptr [[RETVAL]], align 4 // CHECK2-NEXT: [[TMP151:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP151]]) +// CHECK2-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP151]]) // CHECK2-NEXT: [[ARRAY_BEGIN32:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK2-NEXT: [[TMP152:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN32]], i64 2 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -4112,7 +4112,7 @@ // CHECK2-51: arrayctor.cont: // CHECK2-51-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 // CHECK2-51-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK2-51-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK2-51-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = mul nuw i64 10, [[TMP2]] // CHECK2-51-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP4]], align 16 @@ -4382,7 +4382,7 @@ // CHECK2-51-NEXT: [[TMP178:%.*]] = load i32, ptr @a, align 4 // CHECK2-51-NEXT: store i32 [[TMP178]], ptr [[RETVAL]], align 4 // CHECK2-51-NEXT: [[TMP179:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK2-51-NEXT: call void @llvm.stackrestore(ptr [[TMP179]]) +// CHECK2-51-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP179]]) // CHECK2-51-NEXT: [[ARRAY_BEGIN42:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN42]], i64 2 // CHECK2-51-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -5719,7 +5719,7 @@ // CHECK3: arrayctor.cont: // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i64 10, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP3]], align 16 @@ -5964,7 +5964,7 @@ // CHECK3-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4 // CHECK3-NEXT: store i32 [[TMP149]], ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP150:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP150]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP150]]) // CHECK3-NEXT: [[ARRAY_BEGIN51:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN51]], i64 2 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -6735,7 +6735,7 @@ // CHECK4: arrayctor.cont: // CHECK4-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 // CHECK4-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK4-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK4-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK4-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK4-NEXT: [[TMP3:%.*]] = mul nuw i64 10, [[TMP1]] // CHECK4-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP3]], align 16 @@ -6980,7 +6980,7 @@ // CHECK4-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4 // CHECK4-NEXT: store i32 [[TMP149]], ptr [[RETVAL]], align 4 // CHECK4-NEXT: [[TMP150:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK4-NEXT: call void @llvm.stackrestore(ptr [[TMP150]]) +// CHECK4-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP150]]) // CHECK4-NEXT: [[ARRAY_BEGIN51:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK4-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN51]], i64 2 // CHECK4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -7754,7 +7754,7 @@ // CHECK3-51: arrayctor.cont: // CHECK3-51-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 // CHECK3-51-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK3-51-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-51-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-51-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK3-51-NEXT: [[TMP3:%.*]] = mul nuw i64 10, [[TMP1]] // CHECK3-51-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP3]], align 16 @@ -8046,7 +8046,7 @@ // CHECK3-51-NEXT: [[TMP177:%.*]] = load i32, ptr @a, align 4 // CHECK3-51-NEXT: store i32 [[TMP177]], ptr [[RETVAL]], align 4 // CHECK3-51-NEXT: [[TMP178:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-51-NEXT: call void @llvm.stackrestore(ptr [[TMP178]]) +// CHECK3-51-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP178]]) // CHECK3-51-NEXT: [[ARRAY_BEGIN63:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP179:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN63]], i64 2 // CHECK3-51-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -9269,7 +9269,7 @@ // CHECK4-51: arrayctor.cont: // CHECK4-51-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 // CHECK4-51-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK4-51-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK4-51-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK4-51-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK4-51-NEXT: [[TMP3:%.*]] = mul nuw i64 10, [[TMP1]] // CHECK4-51-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP3]], align 16 @@ -9514,7 +9514,7 @@ // CHECK4-51-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4 // CHECK4-51-NEXT: store i32 [[TMP149]], ptr [[RETVAL]], align 4 // CHECK4-51-NEXT: [[TMP150:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK4-51-NEXT: call void @llvm.stackrestore(ptr [[TMP150]]) +// CHECK4-51-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP150]]) // CHECK4-51-NEXT: [[ARRAY_BEGIN51:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN51]], i64 2 // CHECK4-51-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/task_in_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/task_in_reduction_codegen.cpp +++ clang/test/OpenMP/task_in_reduction_codegen.cpp @@ -85,7 +85,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -185,7 +185,7 @@ // CHECK1-NEXT: [[TMP48:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP44]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP49:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP49]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP49]]) // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/taskloop_in_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/taskloop_in_reduction_codegen.cpp +++ clang/test/OpenMP/taskloop_in_reduction_codegen.cpp @@ -71,7 +71,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -163,7 +163,7 @@ // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP43]]) // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp =================================================================== --- clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp +++ clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp @@ -71,7 +71,7 @@ // CHECK1: arrayctor.cont: // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 @@ -163,7 +163,7 @@ // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP43]]) // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 5 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] @@ -695,7 +695,7 @@ // CHECK3: arrayctor.cont: // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP1]], align 16 // CHECK3-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -736,7 +736,7 @@ // CHECK3-NEXT: store i32 5, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP11]]) +// CHECK3-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP11]]) // CHECK3-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [5 x %struct.S], ptr [[C]], i32 0, i32 0 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN6]], i64 5 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] Index: clang/test/OpenMP/teams_distribute_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_codegen.cpp +++ clang/test/OpenMP/teams_distribute_codegen.cpp @@ -987,7 +987,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1065,7 +1065,7 @@ // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK9-NEXT: ret i32 [[TMP38]] // // @@ -1203,7 +1203,7 @@ // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -1282,7 +1282,7 @@ // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK11-NEXT: ret i32 [[TMP38]] // // @@ -1729,7 +1729,7 @@ // CHECK25-NEXT: store i32 100, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK25-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK25-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK25-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1808,7 +1808,7 @@ // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP38]]) // CHECK25-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK25-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK25-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK25-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK25-NEXT: ret i32 [[TMP40]] // @@ -2121,7 +2121,7 @@ // CHECK27-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK27-NEXT: store i32 100, ptr [[N]], align 4 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK27-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK27-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK27-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2201,7 +2201,7 @@ // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP38]]) // CHECK27-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK27-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK27-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK27-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK27-NEXT: ret i32 [[TMP40]] // Index: clang/test/OpenMP/teams_distribute_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_collapse_codegen.cpp +++ clang/test/OpenMP/teams_distribute_collapse_codegen.cpp @@ -463,7 +463,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -567,7 +567,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -924,7 +924,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1029,7 +1029,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // Index: clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp +++ clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp @@ -1008,7 +1008,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1229,7 +1229,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP108]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP109:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP109]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP109]]) // CHECK9-NEXT: [[TMP110:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP110]] // @@ -2034,7 +2034,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2258,7 +2258,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP110]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP111:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP111]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP111]]) // CHECK11-NEXT: [[TMP112:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP112]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp @@ -1397,7 +1397,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1475,7 +1475,7 @@ // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK9-NEXT: ret i32 [[TMP38]] // // @@ -1714,7 +1714,7 @@ // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -1793,7 +1793,7 @@ // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK11-NEXT: ret i32 [[TMP38]] // // @@ -2479,7 +2479,7 @@ // CHECK25-NEXT: store i32 100, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK25-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK25-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK25-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2558,7 +2558,7 @@ // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP38]]) // CHECK25-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK25-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK25-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK25-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK25-NEXT: ret i32 [[TMP40]] // @@ -3043,7 +3043,7 @@ // CHECK27-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK27-NEXT: store i32 100, ptr [[N]], align 4 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK27-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK27-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK27-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3123,7 +3123,7 @@ // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP38]]) // CHECK27-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK27-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK27-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK27-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK27-NEXT: ret i32 [[TMP40]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp @@ -614,7 +614,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -718,7 +718,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1270,7 +1270,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1375,7 +1375,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp @@ -1454,7 +1454,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1685,7 +1685,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP113]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP114:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP114]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP114]]) // CHECK9-NEXT: [[TMP115:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP115]] // @@ -3062,7 +3062,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3296,7 +3296,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP115]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK11-NEXT: [[TMP117:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP117]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp @@ -116,7 +116,7 @@ // CHECK1-NEXT: [[TMP10:%.*]] = sdiv exact i64 [[TMP9]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP11:%.*]] = add nuw i64 [[TMP10]], 1 // CHECK1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP13:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP13]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP11]], align 16 // CHECK1-NEXT: store i64 [[TMP11]], ptr [[__VLA_EXPR0]], align 8 @@ -313,7 +313,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP90:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP90]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP90]]) // CHECK1-NEXT: ret void // // @@ -460,7 +460,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = sdiv exact i64 [[TMP11]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP13:%.*]] = add nuw i64 [[TMP12]], 1 // CHECK1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave() +// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @llvm.stacksave.p0() // CHECK1-NEXT: store ptr [[TMP15]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP13]], align 16 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[__VLA_EXPR0]], align 8 @@ -677,7 +677,7 @@ // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) +// CHECK1-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP105]]) // CHECK1-NEXT: ret void // // Index: clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp @@ -4353,7 +4353,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4735,7 +4735,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP189]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]]) // CHECK13-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP190]] // @@ -6980,7 +6980,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -7367,7 +7367,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP193]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]]) // CHECK15-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP194]] // @@ -9561,7 +9561,7 @@ // CHECK17-NEXT: store i32 100, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK17-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK17-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -9943,7 +9943,7 @@ // CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]]) // CHECK17-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK17-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP189]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]]) // CHECK17-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK17-NEXT: ret i32 [[TMP190]] // @@ -12188,7 +12188,7 @@ // CHECK19-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK19-NEXT: store i32 100, ptr [[N]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -12575,7 +12575,7 @@ // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]]) // CHECK19-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK19-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP193]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]]) // CHECK19-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK19-NEXT: ret i32 [[TMP194]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp @@ -1811,7 +1811,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1898,7 +1898,7 @@ // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[TMP44:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP44]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP44]]) // CHECK9-NEXT: ret i32 [[TMP43]] // // @@ -2186,7 +2186,7 @@ // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2274,7 +2274,7 @@ // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[TMP44:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP44]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP44]]) // CHECK11-NEXT: ret i32 [[TMP43]] // // @@ -2558,7 +2558,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2617,7 +2617,7 @@ // CHECK13-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 // CHECK13-NEXT: [[TMP16:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP16]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP16]]) // CHECK13-NEXT: ret i32 [[TMP15]] // // @@ -2640,7 +2640,7 @@ // CHECK15-NEXT: [[I5:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2698,7 +2698,7 @@ // CHECK15-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 // CHECK15-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK15-NEXT: ret i32 [[TMP14]] // // @@ -3398,7 +3398,7 @@ // CHECK25-NEXT: store i32 100, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK25-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK25-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK25-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -3486,7 +3486,7 @@ // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP43]]) // CHECK25-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK25-NEXT: [[TMP44:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(ptr [[TMP44]]) +// CHECK25-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP44]]) // CHECK25-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK25-NEXT: ret i32 [[TMP45]] // @@ -4034,7 +4034,7 @@ // CHECK27-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK27-NEXT: store i32 100, ptr [[N]], align 4 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK27-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK27-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK27-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -4123,7 +4123,7 @@ // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP43]]) // CHECK27-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK27-NEXT: [[TMP44:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(ptr [[TMP44]]) +// CHECK27-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP44]]) // CHECK27-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK27-NEXT: ret i32 [[TMP45]] // @@ -4662,7 +4662,7 @@ // CHECK29-NEXT: store i32 100, ptr [[N]], align 4 // CHECK29-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK29-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK29-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK29-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK29-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK29-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK29-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4722,7 +4722,7 @@ // CHECK29-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP15]]) // CHECK29-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK29-NEXT: [[TMP16:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK29-NEXT: call void @llvm.stackrestore(ptr [[TMP16]]) +// CHECK29-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP16]]) // CHECK29-NEXT: [[TMP17:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK29-NEXT: ret i32 [[TMP17]] // @@ -4799,7 +4799,7 @@ // CHECK31-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK31-NEXT: store i32 100, ptr [[N]], align 4 // CHECK31-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK31-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK31-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK31-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK31-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK31-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -4858,7 +4858,7 @@ // CHECK31-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP14]]) // CHECK31-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK31-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK31-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK31-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK31-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK31-NEXT: ret i32 [[TMP16]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp @@ -791,7 +791,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -895,7 +895,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1499,7 +1499,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1604,7 +1604,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // @@ -2206,7 +2206,7 @@ // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -2314,7 +2314,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP29]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP29]]) // CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP30]] // @@ -2407,7 +2407,7 @@ // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -2513,7 +2513,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP27]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP27]]) // CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP28]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp @@ -1788,7 +1788,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2019,7 +2019,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP113]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP114:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP114]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP114]]) // CHECK9-NEXT: [[TMP115:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP115]] // @@ -3510,7 +3510,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3744,7 +3744,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP115]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP116:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP116]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP116]]) // CHECK11-NEXT: [[TMP117:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP117]] // @@ -5203,7 +5203,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -5361,7 +5361,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP37]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP38:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP38]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP38]]) // CHECK13-NEXT: [[TMP39:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP39]] // @@ -5522,7 +5522,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -5677,7 +5677,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP36]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP37:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP37]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP37]]) // CHECK15-NEXT: [[TMP38:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP38]] // Index: clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp +++ clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp @@ -5028,7 +5028,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -5410,7 +5410,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP189]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]]) // CHECK13-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP190]] // @@ -7846,7 +7846,7 @@ // CHECK14-NEXT: store i32 100, ptr [[N]], align 4 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK14-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK14-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK14-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK14-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -8228,7 +8228,7 @@ // CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]]) // CHECK14-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK14-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK14-NEXT: call void @llvm.stackrestore(ptr [[TMP189]]) +// CHECK14-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]]) // CHECK14-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK14-NEXT: ret i32 [[TMP190]] // @@ -10663,7 +10663,7 @@ // CHECK17-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK17-NEXT: store i32 100, ptr [[N]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -11050,7 +11050,7 @@ // CHECK17-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]]) // CHECK17-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK17-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP193]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]]) // CHECK17-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK17-NEXT: ret i32 [[TMP194]] // @@ -13433,7 +13433,7 @@ // CHECK19-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK19-NEXT: store i32 100, ptr [[N]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -13820,7 +13820,7 @@ // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]]) // CHECK19-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK19-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP193]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]]) // CHECK19-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK19-NEXT: ret i32 [[TMP194]] // @@ -16199,7 +16199,7 @@ // CHECK21-NEXT: store i32 100, ptr [[N]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK21-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK21-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK21-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK21-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK21-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -16457,7 +16457,7 @@ // CHECK21-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP60]]) // CHECK21-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK21-NEXT: [[TMP61:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK21-NEXT: call void @llvm.stackrestore(ptr [[TMP61]]) +// CHECK21-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP61]]) // CHECK21-NEXT: [[TMP62:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK21-NEXT: ret i32 [[TMP62]] // @@ -16706,7 +16706,7 @@ // CHECK23-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK23-NEXT: store i32 100, ptr [[N]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK23-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK23-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK23-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK23-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -16959,7 +16959,7 @@ // CHECK23-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP59]]) // CHECK23-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK23-NEXT: [[TMP60:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK23-NEXT: call void @llvm.stackrestore(ptr [[TMP60]]) +// CHECK23-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP60]]) // CHECK23-NEXT: [[TMP61:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK23-NEXT: ret i32 [[TMP61]] // Index: clang/test/OpenMP/teams_distribute_simd_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_simd_codegen.cpp +++ clang/test/OpenMP/teams_distribute_simd_codegen.cpp @@ -1370,7 +1370,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1448,7 +1448,7 @@ // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK9-NEXT: ret i32 [[TMP38]] // // @@ -1598,7 +1598,7 @@ // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -1677,7 +1677,7 @@ // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK11-NEXT: ret i32 [[TMP38]] // // @@ -1826,7 +1826,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1882,7 +1882,7 @@ // CHECK13-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4 // CHECK13-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK13-NEXT: ret i32 [[TMP14]] // // @@ -1902,7 +1902,7 @@ // CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -1957,7 +1957,7 @@ // CHECK15-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4 // CHECK15-NEXT: [[TMP14:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP14]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP14]]) // CHECK15-NEXT: ret i32 [[TMP13]] // // @@ -2497,7 +2497,7 @@ // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK21-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal [[META5:![0-9]+]], !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal !5, !llvm.access.group [[ACC_GRP4]] // CHECK21-NEXT: [[CONV:%.*]] = fptosi float [[TMP10]] to i32 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] @@ -2732,7 +2732,7 @@ // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK23-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal [[META6:![0-9]+]], !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal !6, !llvm.access.group [[ACC_GRP5]] // CHECK23-NEXT: [[CONV:%.*]] = fptosi float [[TMP10]] to i32 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] @@ -2958,7 +2958,7 @@ // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK29-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK29-NEXT: [[B3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 1 -// CHECK29-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal [[META3:![0-9]+]], !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal !3, !llvm.access.group [[ACC_GRP2]] // CHECK29-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32 // CHECK29-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK29-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] @@ -3058,7 +3058,7 @@ // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK31-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK31-NEXT: [[B3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 1 -// CHECK31-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal [[META4:![0-9]+]], !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal !4, !llvm.access.group [[ACC_GRP3]] // CHECK31-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32 // CHECK31-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK31-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] @@ -3135,7 +3135,7 @@ // CHECK33-NEXT: store i32 100, ptr [[N]], align 4 // CHECK33-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK33-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK33-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK33-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK33-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK33-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK33-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -3214,7 +3214,7 @@ // CHECK33-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP38]]) // CHECK33-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK33-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK33-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK33-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK33-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK33-NEXT: ret i32 [[TMP40]] // @@ -3544,7 +3544,7 @@ // CHECK35-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK35-NEXT: store i32 100, ptr [[N]], align 4 // CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK35-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK35-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK35-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK35-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK35-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3624,7 +3624,7 @@ // CHECK35-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP38]]) // CHECK35-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK35-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK35-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK35-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK35-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK35-NEXT: ret i32 [[TMP40]] // @@ -3954,7 +3954,7 @@ // CHECK37-NEXT: store i32 100, ptr [[N]], align 4 // CHECK37-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK37-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK37-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK37-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK37-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK37-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK37-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4042,7 +4042,7 @@ // CHECK37-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP43]]) // CHECK37-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK37-NEXT: [[TMP44:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK37-NEXT: call void @llvm.stackrestore(ptr [[TMP44]]) +// CHECK37-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP44]]) // CHECK37-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK37-NEXT: ret i32 [[TMP45]] // @@ -4420,7 +4420,7 @@ // CHECK39-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK39-NEXT: store i32 100, ptr [[N]], align 4 // CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK39-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK39-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK39-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK39-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK39-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -4509,7 +4509,7 @@ // CHECK39-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP43]]) // CHECK39-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK39-NEXT: [[TMP44:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK39-NEXT: call void @llvm.stackrestore(ptr [[TMP44]]) +// CHECK39-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP44]]) // CHECK39-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK39-NEXT: ret i32 [[TMP45]] // @@ -4883,7 +4883,7 @@ // CHECK41-NEXT: store i32 100, ptr [[N]], align 4 // CHECK41-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK41-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK41-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK41-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK41-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK41-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK41-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -4940,7 +4940,7 @@ // CHECK41-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP14]]) // CHECK41-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK41-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK41-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK41-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK41-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK41-NEXT: ret i32 [[TMP16]] // @@ -5014,7 +5014,7 @@ // CHECK43-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK43-NEXT: store i32 100, ptr [[N]], align 4 // CHECK43-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK43-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK43-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK43-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK43-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK43-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -5070,7 +5070,7 @@ // CHECK43-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP13]]) // CHECK43-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK43-NEXT: [[TMP14:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK43-NEXT: call void @llvm.stackrestore(ptr [[TMP14]]) +// CHECK43-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP14]]) // CHECK43-NEXT: [[TMP15:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK43-NEXT: ret i32 [[TMP15]] // @@ -5145,7 +5145,7 @@ // CHECK45-NEXT: store i32 100, ptr [[N]], align 4 // CHECK45-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK45-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK45-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK45-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK45-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK45-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK45-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -5238,7 +5238,7 @@ // CHECK45-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP21]]) // CHECK45-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK45-NEXT: [[TMP22:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK45-NEXT: call void @llvm.stackrestore(ptr [[TMP22]]) +// CHECK45-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP22]]) // CHECK45-NEXT: [[TMP23:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK45-NEXT: ret i32 [[TMP23]] // @@ -5313,7 +5313,7 @@ // CHECK47-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK47-NEXT: store i32 100, ptr [[N]], align 4 // CHECK47-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK47-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK47-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK47-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK47-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK47-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -5404,7 +5404,7 @@ // CHECK47-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP20]]) // CHECK47-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK47-NEXT: [[TMP21:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK47-NEXT: call void @llvm.stackrestore(ptr [[TMP21]]) +// CHECK47-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP21]]) // CHECK47-NEXT: [[TMP22:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK47-NEXT: ret i32 [[TMP22]] // Index: clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp +++ clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp @@ -620,7 +620,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -724,7 +724,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1107,7 +1107,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1212,7 +1212,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // @@ -1593,7 +1593,7 @@ // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -1701,7 +1701,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP29]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP29]]) // CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP30]] // @@ -1794,7 +1794,7 @@ // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1900,7 +1900,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP27]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP27]]) // CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP28]] // Index: clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp +++ clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp @@ -1297,7 +1297,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1518,7 +1518,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP108]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP109:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP109]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP109]]) // CHECK9-NEXT: [[TMP110:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP110]] // @@ -2380,7 +2380,7 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -2604,7 +2604,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP110]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP111:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP111]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP111]]) // CHECK11-NEXT: [[TMP112:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP112]] // @@ -3459,7 +3459,7 @@ // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -3616,7 +3616,7 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP37]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP38:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP38]]) +// CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP38]]) // CHECK13-NEXT: [[TMP39:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK13-NEXT: ret i32 [[TMP39]] // @@ -3771,7 +3771,7 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3925,7 +3925,7 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP36]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP37:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP37]]) +// CHECK15-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP37]]) // CHECK15-NEXT: [[TMP38:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK15-NEXT: ret i32 [[TMP38]] // Index: clang/test/OpenMP/teams_firstprivate_codegen.cpp =================================================================== --- clang/test/OpenMP/teams_firstprivate_codegen.cpp +++ clang/test/OpenMP/teams_firstprivate_codegen.cpp @@ -1901,7 +1901,7 @@ // CHECK17-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 -// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP6]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP3]], [[TMP5]] // CHECK17-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP7]], align 128 @@ -2003,7 +2003,7 @@ // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: // CHECK17-NEXT: [[TMP58:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP58]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP58]]) // CHECK17-NEXT: ret void // // @@ -2068,7 +2068,7 @@ // CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR3]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR5]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VLA2_ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP5:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP5]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP2]], [[TMP3]] // CHECK17-NEXT: [[VLA7:%.*]] = alloca double, i64 [[TMP6]], align 128 @@ -2084,7 +2084,7 @@ // CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[VLA1_ADDR]], align 8 // CHECK17-NEXT: call void @_ZN2St7St_funcEPS_iPg(ptr nonnull align 4 dereferenceable(8) [[ARRAYIDX]], ptr [[TMP10]], i32 signext [[TMP11]], ptr [[TMP12]]) // CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // CHECK17-NEXT: ret void // // @@ -2115,7 +2115,7 @@ // CHECK17-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 -// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP6]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP3]], [[TMP5]] // CHECK17-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP7]], align 128 @@ -2241,7 +2241,7 @@ // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: // CHECK17-NEXT: [[TMP70:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP70]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP70]]) // CHECK17-NEXT: ret void // // @@ -2307,7 +2307,7 @@ // CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR5]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VLA2_ADDR]], align 8 // CHECK17-NEXT: [[TMP5:%.*]] = load ptr, ptr [[N_ADDR]], align 8 -// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave() +// CHECK17-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() // CHECK17-NEXT: store ptr [[TMP6]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP2]], [[TMP3]] // CHECK17-NEXT: [[VLA7:%.*]] = alloca double, i64 [[TMP7]], align 128 @@ -2336,7 +2336,7 @@ // CHECK17-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds ppc_fp128, ptr [[TMP13]], i64 [[IDXPROM11]] // CHECK17-NEXT: store ppc_fp128 [[CONV9]], ptr [[ARRAYIDX12]], align 16 // CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK17-NEXT: ret void // // @@ -2370,7 +2370,7 @@ // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP1]], [[TMP2]] // CHECK19-NEXT: [[VLA:%.*]] = alloca double, i32 [[TMP4]], align 128 @@ -2473,7 +2473,7 @@ // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: // CHECK19-NEXT: [[TMP56:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP56]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP56]]) // CHECK19-NEXT: ret void // // @@ -2538,7 +2538,7 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR3]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR5]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VLA2_ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP5:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP5]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[TMP6:%.*]] = mul nuw i32 [[TMP2]], [[TMP3]] // CHECK19-NEXT: [[VLA7:%.*]] = alloca double, i32 [[TMP6]], align 128 @@ -2554,7 +2554,7 @@ // CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[VLA1_ADDR]], align 4 // CHECK19-NEXT: call void @_ZN2St7St_funcEPS_iPe(ptr nonnull align 4 dereferenceable(8) [[ARRAYIDX]], ptr [[TMP10]], i32 [[TMP11]], ptr [[TMP12]]) // CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP13]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP13]]) // CHECK19-NEXT: ret void // // @@ -2582,7 +2582,7 @@ // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP1]], [[TMP2]] // CHECK19-NEXT: [[VLA:%.*]] = alloca double, i32 [[TMP4]], align 128 @@ -2709,7 +2709,7 @@ // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: // CHECK19-NEXT: [[TMP68:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP68]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP68]]) // CHECK19-NEXT: ret void // // @@ -2775,7 +2775,7 @@ // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR5]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VLA2_ADDR]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load ptr, ptr [[N_ADDR]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave() +// CHECK19-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() // CHECK19-NEXT: store ptr [[TMP6]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP2]], [[TMP3]] // CHECK19-NEXT: [[VLA7:%.*]] = alloca double, i32 [[TMP7]], align 128 @@ -2802,7 +2802,7 @@ // CHECK19-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds x86_fp80, ptr [[TMP13]], i32 [[TMP14]] // CHECK19-NEXT: store x86_fp80 [[CONV9]], ptr [[ARRAYIDX11]], align 4 // CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) +// CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP15]]) // CHECK19-NEXT: ret void // // Index: clang/test/OpenMP/teams_generic_loop_codegen-1.cpp =================================================================== --- clang/test/OpenMP/teams_generic_loop_codegen-1.cpp +++ clang/test/OpenMP/teams_generic_loop_codegen-1.cpp @@ -1366,7 +1366,7 @@ // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -1444,7 +1444,7 @@ // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 // CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK9-NEXT: ret i32 [[TMP38]] // // @@ -1683,7 +1683,7 @@ // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -1762,7 +1762,7 @@ // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 // CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK11-NEXT: ret i32 [[TMP38]] // // @@ -2448,7 +2448,7 @@ // CHECK25-NEXT: store i32 100, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK25-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK25-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK25-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK25-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK25-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 @@ -2527,7 +2527,7 @@ // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP38]]) // CHECK25-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK25-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK25-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK25-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK25-NEXT: ret i32 [[TMP40]] // @@ -3012,7 +3012,7 @@ // CHECK27-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK27-NEXT: store i32 100, ptr [[N]], align 4 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +// CHECK27-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() // CHECK27-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK27-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK27-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 @@ -3092,7 +3092,7 @@ // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP38]]) // CHECK27-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK27-NEXT: [[TMP39:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(ptr [[TMP39]]) +// CHECK27-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP39]]) // CHECK27-NEXT: [[TMP40:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK27-NEXT: ret i32 [[TMP40]] // Index: clang/test/OpenMP/teams_generic_loop_collapse_codgen.cpp =================================================================== --- clang/test/OpenMP/teams_generic_loop_collapse_codgen.cpp +++ clang/test/OpenMP/teams_generic_loop_collapse_codgen.cpp @@ -614,7 +614,7 @@ // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() +// CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave.p0() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 @@ -718,7 +718,7 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP51]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[TMP52:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP52]]) +// CHECK9-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP52]]) // CHECK9-NEXT: [[TMP53:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK9-NEXT: ret i32 [[TMP53]] // @@ -1270,7 +1270,7 @@ // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 @@ -1375,7 +1375,7 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP50]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) +// CHECK11-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP51]]) // CHECK11-NEXT: [[TMP52:%.*]] = load i32, ptr [[RETVAL]], align 4 // CHECK11-NEXT: ret i32 [[TMP52]] // Index: llvm/docs/LangRef.rst =================================================================== --- llvm/docs/LangRef.rst +++ llvm/docs/LangRef.rst @@ -2863,6 +2863,9 @@ address space 0, this property only affects the default value to be used when creating globals without additional contextual information (e.g. in LLVM passes). + +.. _alloca_addrspace: + ``A
`` Specifies the address space of objects created by '``alloca``'. Defaults to the default address space of 0. @@ -13415,7 +13418,8 @@ :: - declare ptr @llvm.stacksave() + declare ptr @llvm.stacksave.p0() + declare ptr addrspace(5) @llvm.stacksave.p5() Overview: """"""""" @@ -13434,8 +13438,10 @@ ``llvm.stackrestore`` intrinsic is executed with a value saved from ``llvm.stacksave``, it effectively restores the state of the stack to the state it was in when the ``llvm.stacksave`` intrinsic executed. In -practice, this pops any :ref:`alloca ` blocks from the stack that -were allocated after the ``llvm.stacksave`` was executed. +practice, this pops any :ref:`alloca ` blocks from the stack +that were allocated after the ``llvm.stacksave`` was executed. The +address space should typically be the +:ref:`alloca address space `. .. _int_stackrestore: @@ -13447,7 +13453,8 @@ :: - declare void @llvm.stackrestore(ptr %ptr) + declare void @llvm.stackrestore.p0(ptr %ptr) + declare void @llvm.stackrestore.p5(ptr addrspace(5) %ptr) Overview: """"""""" @@ -13455,8 +13462,9 @@ The '``llvm.stackrestore``' intrinsic is used to restore the state of the function stack to the state it was in when the corresponding :ref:`llvm.stacksave ` intrinsic executed. This is -useful for implementing language features like scoped automatic variable -sized arrays in C99. +useful for implementing language features like scoped automatic +variable sized arrays in C99. The address space should typically be +the :ref:`alloca address space `. Semantics: """""""""" Index: llvm/docs/ReleaseNotes.rst =================================================================== --- llvm/docs/ReleaseNotes.rst +++ llvm/docs/ReleaseNotes.rst @@ -50,6 +50,9 @@ Changes to the LLVM IR ---------------------- +* The `llvm.stacksave` and `llvm.stackrestore` intrinsics now use + an overloaded pointer type to support non-0 address spaces. + Changes to LLVM infrastructure ------------------------------ Index: llvm/include/llvm/IR/DataLayout.h =================================================================== --- llvm/include/llvm/IR/DataLayout.h +++ llvm/include/llvm/IR/DataLayout.h @@ -275,6 +275,10 @@ unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; } + PointerType *getAllocaPtrType(LLVMContext &Ctx) const { + return PointerType::get(Ctx, AllocaAddrSpace); + } + /// Returns the alignment of function pointers, which may or may not be /// related to the alignment of functions. /// \see getFunctionPtrAlignType Index: llvm/include/llvm/IR/IRBuilder.h =================================================================== --- llvm/include/llvm/IR/IRBuilder.h +++ llvm/include/llvm/IR/IRBuilder.h @@ -1024,6 +1024,19 @@ nullptr, Name); } + /// Create a call to llvm.stacksave + CallInst *CreateStackSave(const Twine &Name = "") { + const DataLayout &DL = BB->getModule()->getDataLayout(); + return CreateIntrinsic(Intrinsic::stacksave, {DL.getAllocaPtrType(Context)}, + {}, nullptr, Name); + } + + /// Create a call to llvm.stackrestore + CallInst *CreateStackRestore(Value *Ptr, const Twine &Name = "") { + return CreateIntrinsic(Intrinsic::stackrestore, {Ptr->getType()}, {Ptr}, + nullptr, Name); + } + private: /// Create a call to a masked intrinsic with given Id. CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef Ops, Index: llvm/include/llvm/IR/Intrinsics.td =================================================================== --- llvm/include/llvm/IR/Intrinsics.td +++ llvm/include/llvm/IR/Intrinsics.td @@ -849,9 +849,9 @@ // Note: we treat stacksave/stackrestore as writemem because we don't otherwise // model their dependencies on allocas. -def int_stacksave : DefaultAttrsIntrinsic<[llvm_ptr_ty]>, +def int_stacksave : DefaultAttrsIntrinsic<[llvm_anyptr_ty]>, ClangBuiltin<"__builtin_stack_save">; -def int_stackrestore : DefaultAttrsIntrinsic<[], [llvm_ptr_ty]>, +def int_stackrestore : DefaultAttrsIntrinsic<[], [llvm_anyptr_ty]>, ClangBuiltin<"__builtin_stack_restore">; def int_get_dynamic_area_offset : DefaultAttrsIntrinsic<[llvm_anyint_ty]>; Index: llvm/lib/CodeGen/SjLjEHPrepare.cpp =================================================================== --- llvm/lib/CodeGen/SjLjEHPrepare.cpp +++ llvm/lib/CodeGen/SjLjEHPrepare.cpp @@ -490,12 +490,15 @@ UnregisterFn = M.getOrInsertFunction( "_Unwind_SjLj_Unregister", Type::getVoidTy(M.getContext()), PointerType::getUnqual(FunctionContextTy)); - FrameAddrFn = Intrinsic::getDeclaration( - &M, Intrinsic::frameaddress, - {Type::getInt8PtrTy(M.getContext(), - M.getDataLayout().getAllocaAddrSpace())}); - StackAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave); - StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore); + + PointerType *AllocaPtrTy = M.getDataLayout().getAllocaPtrType(M.getContext()); + + FrameAddrFn = + Intrinsic::getDeclaration(&M, Intrinsic::frameaddress, {AllocaPtrTy}); + StackAddrFn = + Intrinsic::getDeclaration(&M, Intrinsic::stacksave, {AllocaPtrTy}); + StackRestoreFn = + Intrinsic::getDeclaration(&M, Intrinsic::stackrestore, {AllocaPtrTy}); BuiltinSetupDispatchFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setup_dispatch); LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda); Index: llvm/lib/Target/X86/X86WinEHState.cpp =================================================================== --- llvm/lib/Target/X86/X86WinEHState.cpp +++ llvm/lib/Target/X86/X86WinEHState.cpp @@ -283,8 +283,7 @@ RegNodeTy = getCXXEHRegistrationType(); RegNode = Builder.CreateAlloca(RegNodeTy); // SavedESP = llvm.stacksave() - Value *SP = Builder.CreateCall( - Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave), {}); + Value *SP = Builder.CreateStackSave(); Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0)); // TryLevel = -1 StateFieldIndex = 2; @@ -313,8 +312,7 @@ EHGuardNode = Builder.CreateAlloca(Int32Ty); // SavedESP = llvm.stacksave() - Value *SP = Builder.CreateCall( - Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave), {}); + Value *SP = Builder.CreateStackSave(); Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0)); // TryLevel = -2 / -1 StateFieldIndex = 4; Index: llvm/lib/Transforms/Coroutines/CoroFrame.cpp =================================================================== --- llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -2432,15 +2432,13 @@ static void lowerLocalAllocas(ArrayRef LocalAllocas, SmallVectorImpl &DeadInsts) { for (auto *AI : LocalAllocas) { - auto M = AI->getModule(); IRBuilder<> Builder(AI); // Save the stack depth. Try to avoid doing this if the stackrestore // is going to immediately precede a return or something. Value *StackSave = nullptr; if (localAllocaNeedsStackSave(AI)) - StackSave = Builder.CreateCall( - Intrinsic::getDeclaration(M, Intrinsic::stacksave)); + StackSave = Builder.CreateStackSave(); // Allocate memory. auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); @@ -2458,9 +2456,7 @@ auto FI = cast(U); if (StackSave) { Builder.SetInsertPoint(FI); - Builder.CreateCall( - Intrinsic::getDeclaration(M, Intrinsic::stackrestore), - StackSave); + Builder.CreateStackRestore(StackSave); } } DeadInsts.push_back(cast(U)); Index: llvm/lib/Transforms/IPO/GlobalOpt.cpp =================================================================== --- llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1873,12 +1873,9 @@ CB->eraseFromParent(); Builder.SetInsertPoint(PreallocatedSetup); - auto *StackSave = - Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stacksave)); - + auto *StackSave = Builder.CreateStackSave(); Builder.SetInsertPoint(NewCB->getNextNonDebugInstruction()); - Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackrestore), - StackSave); + Builder.CreateStackRestore(StackSave); // Replace @llvm.call.preallocated.arg() with alloca. // Cannot modify users() while iterating over it, so make a copy. Index: llvm/lib/Transforms/Utils/InlineFunction.cpp =================================================================== --- llvm/lib/Transforms/Utils/InlineFunction.cpp +++ llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -2454,14 +2454,9 @@ // If the inlined code contained dynamic alloca instructions, wrap the inlined // code with llvm.stacksave/llvm.stackrestore intrinsics. if (InlinedFunctionInfo.ContainsDynamicAllocas) { - Module *M = Caller->getParent(); - // Get the two intrinsics we care about. - Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); - Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); - // Insert the llvm.stacksave. CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) - .CreateCall(StackSave, {}, "savedstack"); + .CreateStackSave("savedstack"); // Insert a call to llvm.stackrestore before any return instructions in the // inlined function. @@ -2472,7 +2467,7 @@ continue; if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) continue; - IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); + IRBuilder<>(RI).CreateStackRestore(SavedPtr); } } Index: llvm/test/Bitcode/compatibility-3.6.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.6.ll +++ llvm/test/Bitcode/compatibility-3.6.ll @@ -1128,9 +1128,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility-3.7.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.7.ll +++ llvm/test/Bitcode/compatibility-3.7.ll @@ -1159,9 +1159,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility-3.8.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.8.ll +++ llvm/test/Bitcode/compatibility-3.8.ll @@ -1314,9 +1314,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility-3.9.ll =================================================================== --- llvm/test/Bitcode/compatibility-3.9.ll +++ llvm/test/Bitcode/compatibility-3.9.ll @@ -1385,9 +1385,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility-4.0.ll =================================================================== --- llvm/test/Bitcode/compatibility-4.0.ll +++ llvm/test/Bitcode/compatibility-4.0.ll @@ -1385,9 +1385,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility-5.0.ll =================================================================== --- llvm/test/Bitcode/compatibility-5.0.ll +++ llvm/test/Bitcode/compatibility-5.0.ll @@ -1397,9 +1397,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility-6.0.ll =================================================================== --- llvm/test/Bitcode/compatibility-6.0.ll +++ llvm/test/Bitcode/compatibility-6.0.ll @@ -1407,9 +1407,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call i8* @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(i8* %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/Bitcode/compatibility.ll =================================================================== --- llvm/test/Bitcode/compatibility.ll +++ llvm/test/Bitcode/compatibility.ll @@ -1709,9 +1709,9 @@ ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0) %stack = call ptr @llvm.stacksave() - ; CHECK: %stack = call ptr @llvm.stacksave() + ; CHECK: %stack = call ptr @llvm.stacksave.p0() call void @llvm.stackrestore(ptr %stack) - ; CHECK: call void @llvm.stackrestore(ptr %stack) + ; CHECK: call void @llvm.stackrestore.p0(ptr %stack) call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0) Index: llvm/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll +++ llvm/test/CodeGen/AMDGPU/promote-alloca-unhandled-intrinsic.ll @@ -1,20 +1,20 @@ ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=amdgpu-promote-alloca < %s | FileCheck %s -; This is just an arbitrary intrinisic that shouldn't ever need to be +; This is just an arbitrary intrinisic that shouldn't be ; handled to ensure it doesn't crash. -declare void @llvm.stackrestore(ptr) #2 +declare void @llvm.stackrestore.p5(ptr addrspace(5)) #2 ; CHECK-LABEL: @try_promote_unhandled_intrinsic( ; CHECK: alloca -; CHECK: call void @llvm.stackrestore(ptr %tmp) +; CHECK: call void @llvm.stackrestore.p5(ptr addrspace(5) %tmp) define amdgpu_kernel void @try_promote_unhandled_intrinsic(ptr addrspace(1) %arg) #2 { bb: - %tmp = alloca i32, align 4 + %tmp = alloca i32, addrspace(5) %tmp2 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1 %tmp3 = load i32, ptr addrspace(1) %tmp2 - store i32 %tmp3, ptr %tmp - call void @llvm.stackrestore(ptr %tmp) + store i32 %tmp3, ptr addrspace(5) %tmp + call void @llvm.stackrestore.p5(ptr addrspace(5) %tmp) ret void } Index: llvm/test/Other/force-opaque-ptrs.ll =================================================================== --- llvm/test/Other/force-opaque-ptrs.ll +++ llvm/test/Other/force-opaque-ptrs.ll @@ -72,7 +72,7 @@ define void @remangle_intrinsic() { ; CHECK-LABEL: define {{[^@]+}}@remangle_intrinsic() { ; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: call void @llvm.stackprotector(ptr null, ptr [[A]]) ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.masked.expandload.v2i64(ptr null, <2 x i1> zeroinitializer, <2 x i64> zeroinitializer) ; CHECK-NEXT: ret void Index: llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll =================================================================== --- llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll +++ llvm/test/Transforms/ArgumentPromotion/X86/thiscall.ll @@ -46,18 +46,18 @@ define void @exportedfun(ptr %a) { ; ARGPROMOTION-LABEL: define {{[^@]+}}@exportedfun ; ARGPROMOTION-SAME: (ptr [[A:%.*]]) { -; ARGPROMOTION-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave() +; ARGPROMOTION-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave.p0() ; ARGPROMOTION-NEXT: [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_A:%.*]] }>, align 4 ; ARGPROMOTION-NEXT: call x86_thiscallcc void @internalfun(ptr [[A]], ptr inalloca(<{ [[STRUCT_A]] }>) [[ARGMEM]]) -; ARGPROMOTION-NEXT: call void @llvm.stackrestore(ptr [[INALLOCA_SAVE]]) +; ARGPROMOTION-NEXT: call void @llvm.stackrestore.p0(ptr [[INALLOCA_SAVE]]) ; ARGPROMOTION-NEXT: ret void ; ; GLOBALOPT_ARGPROMOTION-LABEL: define {{[^@]+}}@exportedfun ; GLOBALOPT_ARGPROMOTION-SAME: (ptr [[A:%.*]]) local_unnamed_addr { -; GLOBALOPT_ARGPROMOTION-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave() +; GLOBALOPT_ARGPROMOTION-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave.p0() ; GLOBALOPT_ARGPROMOTION-NEXT: [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_A:%.*]] }>, align 4 ; GLOBALOPT_ARGPROMOTION-NEXT: call fastcc void @internalfun(ptr [[ARGMEM]]) -; GLOBALOPT_ARGPROMOTION-NEXT: call void @llvm.stackrestore(ptr [[INALLOCA_SAVE]]) +; GLOBALOPT_ARGPROMOTION-NEXT: call void @llvm.stackrestore.p0(ptr [[INALLOCA_SAVE]]) ; GLOBALOPT_ARGPROMOTION-NEXT: ret void ; %inalloca.save = tail call ptr @llvm.stacksave() Index: llvm/test/Transforms/Attributor/ArgumentPromotion/X86/thiscall.ll =================================================================== --- llvm/test/Transforms/Attributor/ArgumentPromotion/X86/thiscall.ll +++ llvm/test/Transforms/Attributor/ArgumentPromotion/X86/thiscall.ll @@ -32,18 +32,18 @@ define void @exportedfun(ptr %a) { ; TUNIT-LABEL: define {{[^@]+}}@exportedfun ; TUNIT-SAME: (ptr nocapture nofree readnone [[A:%.*]]) { -; TUNIT-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave() #[[ATTR1:[0-9]+]] +; TUNIT-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave.p0() #[[ATTR1:[0-9]+]] ; TUNIT-NEXT: [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_A:%.*]] }>, align 4 ; TUNIT-NEXT: call x86_thiscallcc void @internalfun(ptr noalias nocapture nofree readnone undef, ptr noundef nonnull inalloca(<{ [[STRUCT_A]] }>) align 4 dereferenceable(1) [[ARGMEM]]) -; TUNIT-NEXT: call void @llvm.stackrestore(ptr nofree [[INALLOCA_SAVE]]) +; TUNIT-NEXT: call void @llvm.stackrestore.p0(ptr nofree [[INALLOCA_SAVE]]) ; TUNIT-NEXT: ret void ; ; CGSCC-LABEL: define {{[^@]+}}@exportedfun ; CGSCC-SAME: (ptr nocapture nofree readnone [[A:%.*]]) { -; CGSCC-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave() #[[ATTR1:[0-9]+]] +; CGSCC-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave.p0() #[[ATTR1:[0-9]+]] ; CGSCC-NEXT: [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_A:%.*]] }>, align 4 ; CGSCC-NEXT: call x86_thiscallcc void @internalfun(ptr noalias nocapture nofree readnone [[A]], ptr noundef nonnull inalloca(<{ [[STRUCT_A]] }>) align 4 dereferenceable(1) [[ARGMEM]]) -; CGSCC-NEXT: call void @llvm.stackrestore(ptr nofree [[INALLOCA_SAVE]]) +; CGSCC-NEXT: call void @llvm.stackrestore.p0(ptr nofree [[INALLOCA_SAVE]]) ; CGSCC-NEXT: ret void ; %inalloca.save = tail call ptr @llvm.stacksave() Index: llvm/test/Transforms/GVNHoist/hoist-pr28606.ll =================================================================== --- llvm/test/Transforms/GVNHoist/hoist-pr28606.ll +++ llvm/test/Transforms/GVNHoist/hoist-pr28606.ll @@ -15,10 +15,10 @@ ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[X:%.*]] = alloca i8, align 1 -; CHECK-NEXT: [[INALLOCA_SAVE:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[INALLOCA_SAVE:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_S:%.*]] }>, align 4 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARGMEM]], i32 0, i32 1 -; CHECK-NEXT: store ptr [[X]], ptr [[TMP1]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARGMEM]], i32 0, i32 1 +; CHECK-NEXT: store ptr [[X]], ptr [[TMP0]], align 4 ; CHECK-NEXT: br i1 [[B:%.*]], label [[TRUE:%.*]], label [[FALSE:%.*]] ; CHECK: true: ; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARGMEM]], i32 0, i32 1 @@ -28,7 +28,7 @@ ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: call void @f(ptr inalloca(<{ [[STRUCT_S]] }>) [[ARGMEM]]) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[INALLOCA_SAVE]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[INALLOCA_SAVE]]) ; CHECK-NEXT: ret void ; entry: Index: llvm/test/Transforms/GlobalOpt/preallocated.ll =================================================================== --- llvm/test/Transforms/GlobalOpt/preallocated.ll +++ llvm/test/Transforms/GlobalOpt/preallocated.ll @@ -54,13 +54,13 @@ define i32 @call_preallocated_multiple_args() { ; CHECK-LABEL: define {{[^@]+}}@call_preallocated_multiple_args() local_unnamed_addr { -; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[PAARG:%.*]] = alloca i32, align 4 ; CHECK-NEXT: call void @foo(ptr [[PAARG]]) ; CHECK-NEXT: call void @foo(ptr [[PAARG]]) ; CHECK-NEXT: call void @foo(ptr [[PAARG]]) ; CHECK-NEXT: [[R:%.*]] = call fastcc i32 @preallocated(ptr [[PAARG]]) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[TMP1]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP1]]) ; CHECK-NEXT: ret i32 [[R]] ; %c = call token @llvm.call.preallocated.setup(i32 1) Index: llvm/test/Transforms/IndVarSimplify/sink-alloca.ll =================================================================== --- llvm/test/Transforms/IndVarSimplify/sink-alloca.ll +++ llvm/test/Transforms/IndVarSimplify/sink-alloca.ll @@ -33,7 +33,7 @@ define void @h(i64 %n) nounwind uwtable ssp { ; CHECK: entry: ; CHECK-NEXT: %vla = alloca ptr -; CHECK-NEXT: %savedstack = call ptr @llvm.stacksave() +; CHECK-NEXT: %savedstack = call ptr @llvm.stacksave.p0() entry: %vla = alloca ptr, i64 %n, align 16 %savedstack = call ptr @llvm.stacksave() nounwind Index: llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll =================================================================== --- llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll +++ llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll @@ -53,11 +53,11 @@ ; CHECK-NEXT: [[COND:%.*]] = icmp eq i1 [[B]], true ; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]] ; CHECK: split: -; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 60000, ptr [[VLA_I]]) ; CHECK-NEXT: call void @extern_call(ptr nonnull [[VLA_I]]) #[[ATTR3]] ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 60000, ptr [[VLA_I]]) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[SAVEDSTACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SAVEDSTACK]]) ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void @@ -187,7 +187,7 @@ ; CHECK-LABEL: define {{[^@]+}}@test_stack_allocate_always ; CHECK-SAME: (i32 [[SIZE:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[SIZE]], 100 ; CHECK-NEXT: [[CONV_I:%.*]] = zext i32 [[SIZE]] to i64 ; CHECK-NEXT: br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]] @@ -199,7 +199,7 @@ ; CHECK-NEXT: br label [[STACK_ALLOCATE_EXIT]] ; CHECK: stack_allocate.exit: ; CHECK-NEXT: [[RETVAL_0_I:%.*]] = phi ptr [ [[TMP0]], [[IF_THEN_I]] ], [ [[CALL_I]], [[IF_END_I]] ] -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[SAVEDSTACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SAVEDSTACK]]) ; CHECK-NEXT: ret ptr [[RETVAL_0_I]] ; entry: Index: llvm/test/Transforms/Inline/inalloca-not-static.ll =================================================================== --- llvm/test/Transforms/Inline/inalloca-not-static.ll +++ llvm/test/Transforms/Inline/inalloca-not-static.ll @@ -52,9 +52,9 @@ } ; CHECK: define void @f() -; CHECK: %[[STACKSAVE:.*]] = call ptr @llvm.stacksave() +; CHECK: %[[STACKSAVE:.*]] = call ptr @llvm.stacksave.p0() ; CHECK: %[[ARGMEM:.*]] = alloca inalloca <{ %struct.Foo }>, align 4 ; CHECK: %[[CALL:.*]] = call x86_thiscallcc ptr @"\01??0Foo@@QAE@XZ"(ptr %[[ARGMEM]]) ; CHECK: call x86_thiscallcc void @"\01??1Foo@@QAE@XZ"(ptr %[[ARGMEM]]) -; CHECK: call void @llvm.stackrestore(ptr %[[STACKSAVE]]) +; CHECK: call void @llvm.stackrestore.p0(ptr %[[STACKSAVE]]) ; CHECK: ret void Index: llvm/test/Transforms/Inline/inline-tail.ll =================================================================== --- llvm/test/Transforms/Inline/inline-tail.ll +++ llvm/test/Transforms/Inline/inline-tail.ll @@ -86,7 +86,7 @@ ; CHECK-LABEL: define void @test_dynalloca_a ; CHECK-SAME: (ptr byval(i32) [[P:%.*]], i32 [[N:%.*]]) { ; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[P1]], ptr align 1 [[P]], i64 4, i1 false) ; CHECK-NEXT: [[BUF_I:%.*]] = alloca i8, i32 [[N]], align 1 Index: llvm/test/Transforms/InstCombine/sink-alloca.ll =================================================================== --- llvm/test/Transforms/InstCombine/sink-alloca.ll +++ llvm/test/Transforms/InstCombine/sink-alloca.ll @@ -40,13 +40,13 @@ ; CHECK-LABEL: define void @foo(i32 %x) ; CHECK: nonentry: ; CHECK: %argmem = alloca i32, i32 %x -; CHECK: %sp = call ptr @llvm.stacksave() +; CHECK: %sp = call ptr @llvm.stacksave.p0() ; CHECK: %c2 = call i1 @cond() ; CHECK: br i1 %c2, label %ret, label %sinktarget ; CHECK: sinktarget: ; CHECK: %p = call ptr @use_and_return(ptr nonnull %argmem) ; CHECK: store i32 13, ptr %p -; CHECK: call void @llvm.stackrestore(ptr %sp) +; CHECK: call void @llvm.stackrestore.p0(ptr %sp) ; CHECK: %0 = call ptr @use_and_return(ptr nonnull %p) attributes #0 = { nounwind } Index: llvm/test/Transforms/InstCombine/stacksaverestore.ll =================================================================== --- llvm/test/Transforms/InstCombine/stacksaverestore.ll +++ llvm/test/Transforms/InstCombine/stacksaverestore.ll @@ -63,11 +63,11 @@ } ; CHECK-LABEL: define void @foo( -; CHECK: %tmp = call ptr @llvm.stacksave() +; CHECK: %tmp = call ptr @llvm.stacksave.p0() ; CHECK: alloca i8 ; CHECK-NOT: stacksave ; CHECK: call void @bar( -; CHECK-NEXT: call void @llvm.stackrestore(ptr %tmp) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr %tmp) ; CHECK: ret void declare void @bar(i32, ptr, ptr, ptr, ptr, i32) @@ -103,11 +103,11 @@ ; CHECK-LABEL: define void @test3( ; CHECK: loop: ; CHECK: %i = phi i32 [ 0, %entry ], [ %i1, %loop ] -; CHECK: %save1 = call ptr @llvm.stacksave() +; CHECK: %save1 = call ptr @llvm.stacksave.p0() ; CHECK: %argmem = alloca inalloca i32 ; CHECK: store i32 0, ptr %argmem ; CHECK: call void @inalloca_callee(ptr {{.*}} inalloca(i32) %argmem) -; CHECK: call void @llvm.stackrestore(ptr %save1) +; CHECK: call void @llvm.stackrestore.p0(ptr %save1) ; CHECK: br i1 %done, label %loop, label %return ; CHECK: ret void Index: llvm/test/Transforms/MemCpyOpt/stackrestore.ll =================================================================== --- llvm/test/Transforms/MemCpyOpt/stackrestore.ll +++ llvm/test/Transforms/MemCpyOpt/stackrestore.ll @@ -51,13 +51,13 @@ define i32 @test_stackrestore() { ; CHECK-LABEL: @test_stackrestore( ; CHECK-NEXT: [[TMPMEM:%.*]] = alloca [10 x i8], align 4 -; CHECK-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave() +; CHECK-NEXT: [[INALLOCA_SAVE:%.*]] = tail call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[ARGMEM:%.*]] = alloca inalloca [10 x i8], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[ARGMEM]], ptr align 1 @str, i32 9, i1 false) ; CHECK-NEXT: [[P10:%.*]] = getelementptr inbounds [10 x i8], ptr [[ARGMEM]], i32 0, i32 9 ; CHECK-NEXT: store i8 0, ptr [[P10]], align 1 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[TMPMEM]], ptr [[ARGMEM]], i32 10, i1 false) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[INALLOCA_SAVE]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[INALLOCA_SAVE]]) ; CHECK-NEXT: [[HEAP:%.*]] = call ptr @malloc(i32 9) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[HEAP]], ptr [[TMPMEM]], i32 9, i1 false) ; CHECK-NEXT: call void @useit(ptr [[HEAP]]) Index: llvm/test/Transforms/ObjCARC/post-inlining.ll =================================================================== --- llvm/test/Transforms/ObjCARC/post-inlining.ll +++ llvm/test/Transforms/ObjCARC/post-inlining.ll @@ -92,9 +92,9 @@ ; CHECK-LABEL: define ptr @testStack( ; CHECK: entry: -; CHECK-NEXT: %save = tail call ptr @llvm.stacksave() +; CHECK-NEXT: %save = tail call ptr @llvm.stacksave.p0() ; CHECK-NEXT: %obj = alloca i8, i8 %arg -; CHECK-NEXT: call void @llvm.stackrestore(ptr %save) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr %save) ; CHECK-NEXT: ret ptr %call.i ; CHECK-NEXT: } define ptr @testStack(ptr %call.i, i8 %arg) { Index: llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll +++ llvm/test/Transforms/SLPVectorizer/X86/stackrestore-dependence.ll @@ -4,13 +4,13 @@ ; The test checks that loads should not be moved below a stackrestore boundary. define void @stackrestore1(ptr %out) { ; CHECK-LABEL: @stackrestore1( -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[LOCAL_ALLOCA:%.*]] = alloca [16 x i8], align 4 ; CHECK-NEXT: store <4 x float> , ptr [[LOCAL_ALLOCA]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[LOCAL_ALLOCA]], align 4 -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) -; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> -; CHECK-NEXT: store <4 x float> [[SHUFFLE]], ptr [[OUT:%.*]], align 4 +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> +; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[OUT:%.*]], align 4 ; CHECK-NEXT: ret void ; %stack = call ptr @llvm.stacksave() Index: llvm/test/Transforms/SLPVectorizer/X86/stacksave-dependence.ll =================================================================== --- llvm/test/Transforms/SLPVectorizer/X86/stacksave-dependence.ll +++ llvm/test/Transforms/SLPVectorizer/X86/stacksave-dependence.ll @@ -91,10 +91,10 @@ ; CHECK-NEXT: [[V1:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[ADD1:%.*]] = getelementptr i8, ptr [[V1]], i32 1 ; CHECK-NEXT: store ptr [[ADD1]], ptr [[A:%.*]], align 8 -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[V2:%.*]] = alloca inalloca i8, align 1 ; CHECK-NEXT: call void @use(ptr inalloca(i8) [[V2]]) #[[ATTR4:[0-9]+]] -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) ; CHECK-NEXT: [[ADD2:%.*]] = getelementptr i8, ptr [[V2]], i32 1 ; CHECK-NEXT: store ptr [[ADD1]], ptr [[B:%.*]], align 8 ; CHECK-NEXT: [[B2:%.*]] = getelementptr ptr, ptr [[B]], i32 1 @@ -122,7 +122,7 @@ define void @stacksave2(ptr %a, ptr %b, ptr %c) { ; CHECK-LABEL: @stacksave2( ; CHECK-NEXT: [[V1:%.*]] = alloca i8, align 1 -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[V2:%.*]] = alloca inalloca i8, align 1 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[V1]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x ptr> [[TMP1]], ptr [[V2]], i32 1 @@ -130,7 +130,7 @@ ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x ptr> [[TMP3]], i32 0 ; CHECK-NEXT: store ptr [[TMP4]], ptr [[A:%.*]], align 8 ; CHECK-NEXT: call void @use(ptr inalloca(i8) [[V2]]) #[[ATTR5:[0-9]+]] -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) ; CHECK-NEXT: store <2 x ptr> [[TMP3]], ptr [[B:%.*]], align 8 ; CHECK-NEXT: ret void ; @@ -154,11 +154,11 @@ define void @stacksave3(ptr %a, ptr %b, ptr %c) { ; CHECK-LABEL: @stacksave3( -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[V1:%.*]] = alloca i8, align 1 ; CHECK-NEXT: [[V2:%.*]] = alloca inalloca i8, align 1 ; CHECK-NEXT: call void @use(ptr inalloca(i8) [[V2]]) #[[ATTR4]] -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[V1]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x ptr> [[TMP1]], ptr [[V2]], i32 1 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, <2 x ptr> [[TMP2]], <2 x i32> @@ -189,10 +189,10 @@ ; CHECK-LABEL: @stacksave4( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x ptr>, ptr [[A:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, <2 x ptr> [[TMP1]], <2 x i32> -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[X:%.*]] = alloca inalloca i8, align 1 ; CHECK-NEXT: call void @use(ptr inalloca(i8) [[X]]) #[[ATTR4]] -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) ; CHECK-NEXT: store <2 x ptr> [[TMP2]], ptr [[B:%.*]], align 8 ; CHECK-NEXT: ret void ; @@ -219,10 +219,10 @@ ; CHECK-LABEL: @stacksave5( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x ptr>, ptr [[A:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, <2 x ptr> [[TMP1]], <2 x i32> -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[X:%.*]] = alloca inalloca i8, align 1 ; CHECK-NEXT: call void @use(ptr inalloca(i8) [[X]]) #[[ATTR4]] -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) ; CHECK-NEXT: store <2 x ptr> [[TMP2]], ptr [[B:%.*]], align 8 ; CHECK-NEXT: ret void ; @@ -250,10 +250,10 @@ ; bug. define void @stackrestore1(ptr %a, ptr %b, ptr %c) { ; CHECK-LABEL: @stackrestore1( -; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[STACK:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[V1:%.*]] = alloca i8, align 1 ; CHECK-NEXT: store i8 0, ptr [[V1]], align 1 -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[STACK]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[STACK]]) ; CHECK-NEXT: [[V2:%.*]] = alloca i8, align 1 ; CHECK-NEXT: store i8 0, ptr [[V2]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[V1]], i32 0 @@ -298,10 +298,10 @@ ; CHECK-NEXT: [[VAR15:%.*]] = call ptr @wibble(ptr [[VAR2]]) ; CHECK-NEXT: [[VAR16:%.*]] = call ptr @wibble(ptr [[VAR3]]) ; CHECK-NEXT: [[VAR17:%.*]] = call ptr @wibble(ptr [[VAR4]]) -; CHECK-NEXT: [[VAR23:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[VAR23:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[VAR24:%.*]] = alloca inalloca i32, align 4 ; CHECK-NEXT: call void @quux(ptr inalloca(i32) [[VAR24]]) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[VAR23]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[VAR23]]) ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr [[VAR4]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x ptr> [[TMP1]], <4 x ptr> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: store <4 x ptr> [[TMP2]], ptr [[VAR12]], align 8 Index: llvm/test/Transforms/SimplifyCFG/hoist-common-skip.ll =================================================================== --- llvm/test/Transforms/SimplifyCFG/hoist-common-skip.ll +++ llvm/test/Transforms/SimplifyCFG/hoist-common-skip.ll @@ -390,26 +390,26 @@ ;; Don't hoist stacksaves across inalloca allocas define void @f10(i1 %cond) { ; CHECK-LABEL: @f10( -; CHECK-NEXT: [[SS:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[SS:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: ; CHECK-NEXT: [[I1:%.*]] = alloca inalloca i32, align 4 -; CHECK-NEXT: [[SS2:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[SS2:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[I2:%.*]] = alloca inalloca i64, align 8 ; CHECK-NEXT: call void @inalloca_i64(ptr inalloca(i64) [[I2]]) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[SS2]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SS2]]) ; CHECK-NEXT: call void @inalloca_i32(ptr inalloca(i32) [[I1]]) ; CHECK-NEXT: br label [[END:%.*]] ; CHECK: bb2: ; CHECK-NEXT: [[I3:%.*]] = alloca inalloca i64, align 8 -; CHECK-NEXT: [[SS3:%.*]] = call ptr @llvm.stacksave() +; CHECK-NEXT: [[SS3:%.*]] = call ptr @llvm.stacksave.p0() ; CHECK-NEXT: [[I4:%.*]] = alloca inalloca i64, align 8 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @inalloca_i64(ptr inalloca(i64) [[I4]]) -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[SS3]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SS3]]) ; CHECK-NEXT: [[TMP2:%.*]] = call ptr @inalloca_i64(ptr inalloca(i64) [[I3]]) ; CHECK-NEXT: br label [[END]] ; CHECK: end: -; CHECK-NEXT: call void @llvm.stackrestore(ptr [[SS]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SS]]) ; CHECK-NEXT: ret void ; %ss = call ptr @llvm.stacksave() Index: mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td =================================================================== --- mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td +++ mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td @@ -618,12 +618,12 @@ // Stack save/restore intrinsics. // -def LLVM_StackSaveOp : LLVM_OneResultIntrOp<"stacksave"> { - let assemblyFormat = "attr-dict `:` type($res)"; +def LLVM_StackSaveOp : LLVM_OneResultIntrOp<"stacksave", [0]> { + let assemblyFormat = "attr-dict `:` qualified(type($res))"; } -def LLVM_StackRestoreOp : LLVM_ZeroResultIntrOp<"stackrestore"> { - let arguments = (ins LLVM_i8Ptr:$ptr); +def LLVM_StackRestoreOp : LLVM_ZeroResultIntrOp<"stackrestore", [0]> { + let arguments = (ins LLVM_AnyPointer:$ptr); let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr))"; }