Index: lib/CodeGen/CGLoopInfo.h =================================================================== --- lib/CodeGen/CGLoopInfo.h +++ lib/CodeGen/CGLoopInfo.h @@ -84,6 +84,21 @@ /// Get the set of attributes active for this loop. const LoopAttributes &getAttributes() const { return Attrs; } + /// Update the LoopID properties using information collected during codegen of + /// the loop's body. + void updateLoopMetadata() const; + + /// Return this loop's access group or nullptr if it does not have one. + llvm::MDNode *getAccessGroup() const { return AccGroup; } + + /// Return the list of all access groups within this loop. + llvm::ArrayRef getNestedAccGroups() const { + return NestedAccGroups; + } + + /// Add the access groups of nested loops. + void addAccGroups(llvm::ArrayRef NestedGroups); + private: /// Loop ID metadata. llvm::MDNode *LoopID; @@ -91,6 +106,11 @@ llvm::BasicBlock *Header; /// The attributes for this loop. LoopAttributes Attrs; + /// The access group for memory accesses within this loop. + llvm::MDNode *AccGroup = nullptr; + /// List of a all access groups within and including this loop; includes + /// access groups for nested loops. + llvm::SmallVector NestedAccGroups; }; /// A stack of loop information corresponding to loop nesting levels. Index: lib/CodeGen/CGLoopInfo.cpp =================================================================== --- lib/CodeGen/CGLoopInfo.cpp +++ lib/CodeGen/CGLoopInfo.cpp @@ -21,7 +21,7 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs, const llvm::DebugLoc &StartLoc, - const llvm::DebugLoc &EndLoc) { + const llvm::DebugLoc &EndLoc, MDNode *&AccGroup) { if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 && Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 && @@ -122,6 +122,12 @@ Args.push_back(MDNode::get(Ctx, Vals)); } + if (Attrs.IsParallel) { + AccGroup = MDNode::getDistinct(Ctx, {}); + Args.push_back(MDNode::get( + Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccGroup})); + } + // Set the first operand to itself. MDNode *LoopID = MDNode::get(Ctx, Args); LoopID->replaceOperandWith(0, LoopID); @@ -150,7 +156,10 @@ LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc) : LoopID(nullptr), Header(Header), Attrs(Attrs) { - LoopID = createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc); + LoopID = + createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc, AccGroup); + if (AccGroup) + NestedAccGroups.push_back(AccGroup); } void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc, @@ -322,12 +331,63 @@ push(Header, StartLoc, EndLoc); } +void LoopInfo::updateLoopMetadata() const { + // We may have collected additional access groups from nested loops. Update + // llvm.loop.parallel_accesses to include these accesses. At the moment a loop + // has an access group iff it is parallel such that the last propert already + // is a "llvm.loop.parallel_accesses". + if (AccGroup && NestedAccGroups.size() >= 2) { + LLVMContext &Ctx = AccGroup->getContext(); + MDNode *LoopID = getLoopID(); + unsigned LastOp = LoopID->getNumOperands() - 1; + assert( + cast( + cast(LoopID->getOperand(LastOp).get())->getOperand(0).get()) + ->getString() == "llvm.loop.parallel_accesses"); + + SmallVector Args; + Args.reserve(1 + NestedAccGroups.size()); + Args.push_back(MDString::get(Ctx, "llvm.loop.parallel_accesses")); + for (MDNode *InnerGroup : NestedAccGroups) + Args.push_back(InnerGroup); + + MDNode *ParallelAccesses = MDNode::get(Ctx, Args); + LoopID->replaceOperandWith(LastOp, ParallelAccesses); + } +} + +void LoopInfo::addAccGroups(ArrayRef NestedGroups) { + NestedAccGroups.insert(NestedAccGroups.end(), NestedGroups.begin(), + NestedGroups.end()); +} + void LoopInfoStack::pop() { assert(!Active.empty() && "No active loops to pop"); + + const LoopInfo &Front = getInfo(); + Front.updateLoopMetadata(); + + // Inherit access groups. + if (Active.size() >= 2) { + LoopInfo &NewFront = reverse(Active).begin()[1]; + NewFront.addAccGroups(Front.getNestedAccGroups()); + } + Active.pop_back(); } void LoopInfoStack::InsertHelper(Instruction *I) const { + if (I->mayReadOrWriteMemory()) { + /// Add this memory access instruction to the innermost loop that has an + /// access group. + for (const LoopInfo &AL : reverse(Active)) { + if (MDNode *Group = AL.getAccessGroup()) { + I->setMetadata("llvm.access.group", Group); + break; + } + } + } + if (!hasInfo()) return; @@ -343,18 +403,4 @@ } return; } - - if (I->mayReadOrWriteMemory()) { - SmallVector ParallelLoopIDs; - for (const LoopInfo &AL : Active) - if (AL.getAttributes().IsParallel) - ParallelLoopIDs.push_back(AL.getLoopID()); - - MDNode *ParallelMD = nullptr; - if (ParallelLoopIDs.size() == 1) - ParallelMD = cast(ParallelLoopIDs[0]); - else if (ParallelLoopIDs.size() >= 2) - ParallelMD = MDNode::get(I->getContext(), ParallelLoopIDs); - I->setMetadata("llvm.mem.parallel_loop_access", ParallelMD); - } } Index: test/CodeGenCXX/pragma-loop-safety-nested.cpp =================================================================== --- test/CodeGenCXX/pragma-loop-safety-nested.cpp +++ test/CodeGenCXX/pragma-loop-safety-nested.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin -std=c++11 -emit-llvm -o - %s | FileCheck %s -// Verify that the inner access is tagged with a parallel_loop_access -// for the inner and outer loop using a list. +// Verify that the outer loop has the llvm.access.group property for the +// accesses outside and inside the inner loop. void vectorize_nested_test(int *List, int Length) { #pragma clang loop vectorize(assume_safety) interleave(disable) unroll(disable) for (int i = 0; i < Length; ++i) { @@ -11,11 +11,17 @@ } } + +// CHECK: load i32, i32* %Length.addr, align 4, !llvm.access.group ![[ACCESS_GROUP_2:[0-9]+]] + // CHECK: %[[MUL:.+]] = mul -// CHECK: store i32 %[[MUL]], i32* %{{.+}}, !llvm.mem.parallel_loop_access ![[PARALLEL_LIST:[0-9]+]] +// CHECK: store i32 %[[MUL]], i32* %{{.+}}, !llvm.access.group ![[ACCESS_GROUP_3:[0-9]+]] // CHECK: br label %{{.+}}, !llvm.loop ![[INNER_LOOPID:[0-9]+]] // CHECK: br label %{{.+}}, !llvm.loop ![[OUTER_LOOPID:[0-9]+]] -// CHECK: ![[OUTER_LOOPID]] = distinct !{![[OUTER_LOOPID]], -// CHECK: ![[PARALLEL_LIST]] = !{![[OUTER_LOOPID]], ![[INNER_LOOPID]]} -// CHECK: ![[INNER_LOOPID]] = distinct !{![[INNER_LOOPID]], +// CHECK: ![[ACCESS_GROUP_2]] = distinct !{} +// CHECK: ![[ACCESS_GROUP_3]] = distinct !{} +// CHECK: ![[INNER_LOOPID]] = distinct !{![[INNER_LOOPID]], {{.*}} ![[PARALLEL_ACCESSES_8:[0-9]+]]} +// CHECK: ![[PARALLEL_ACCESSES_8]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_3]]} +// CHECK: ![[OUTER_LOOPID]] = distinct !{![[OUTER_LOOPID]], {{.*}} ![[PARALLEL_ACCESSES_10:[0-9]+]]} +// CHECK: ![[PARALLEL_ACCESSES_10]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_2]], ![[ACCESS_GROUP_3]]} Index: test/CodeGenCXX/pragma-loop-safety-outer.cpp =================================================================== --- test/CodeGenCXX/pragma-loop-safety-outer.cpp +++ test/CodeGenCXX/pragma-loop-safety-outer.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin -std=c++11 -emit-llvm -o - %s | FileCheck %s -// Verify that the inner access is tagged with a parallel_loop_access -// for the outer loop. +// Verify that the outer loop has the inner loop's access in its +// llvm.loop.parallel_accesses property. void vectorize_outer_test(int *List, int Length) { #pragma clang loop vectorize(assume_safety) interleave(disable) unroll(disable) for (int i = 0; i < Length; i += 2) { @@ -12,9 +12,11 @@ } // CHECK: %[[MUL:.+]] = mul -// CHECK: store i32 %[[MUL]], i32* %{{.+}}, !llvm.mem.parallel_loop_access ![[OUTER_LOOPID:[0-9]+]] +// CHECK: store i32 %[[MUL]], i32* %{{.+}}, !llvm.access.group ![[ACCESS_GROUP_2:[0-9]+]] // CHECK: br label %{{.+}}, !llvm.loop ![[INNER_LOOPID:[0-9]+]] -// CHECK: br label %{{.+}}, !llvm.loop ![[OUTER_LOOPID]] +// CHECK: br label %{{.+}}, !llvm.loop ![[OUTER_LOOPID:[0-9]+]] -// CHECK: ![[OUTER_LOOPID]] = distinct !{![[OUTER_LOOPID]], +// CHECK: ![[ACCESS_GROUP_2]] = distinct !{} // CHECK: ![[INNER_LOOPID]] = distinct !{![[INNER_LOOPID]], +// CHECK: ![[OUTER_LOOPID]] = distinct !{![[OUTER_LOOPID]], {{.*}} ![[PARALLEL_ACCESSES_9:[0-9]+]]} +// CHECK: ![[PARALLEL_ACCESSES_9]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_2]]} Index: test/CodeGenCXX/pragma-loop-safety.cpp =================================================================== --- test/CodeGenCXX/pragma-loop-safety.cpp +++ test/CodeGenCXX/pragma-loop-safety.cpp @@ -3,19 +3,19 @@ // Verify assume_safety vectorization is recognized. void vectorize_test(int *List, int Length) { // CHECK: define {{.*}} @_Z14vectorize_test -// CHECK: [[LOAD1_IV:.+]] = load i32, i32* [[IV1:[^,]+]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP1_ID:[0-9]+]] -// CHECK-NEXT: [[LOAD1_LEN:.+]] = load i32, i32* [[LEN1:.+]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP1_ID]] +// CHECK: [[LOAD1_IV:.+]] = load i32, i32* [[IV1:[^,]+]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_2:[0-9]+]] +// CHECK-NEXT: [[LOAD1_LEN:.+]] = load i32, i32* [[LEN1:.+]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_2]] // CHECK-NEXT: [[CMP1:.+]] = icmp slt i32[[LOAD1_IV]],[[LOAD1_LEN]] // CHECK-NEXT: br i1[[CMP1]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] #pragma clang loop vectorize(assume_safety) interleave(disable) unroll(disable) for (int i = 0; i < Length; i++) { - // CHECK: [[RHIV1:.+]] = load i32, i32* [[IV1]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP1_ID]] + // CHECK: [[RHIV1:.+]] = load i32, i32* [[IV1]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_2]] // CHECK-DAG: [[CALC1:.+]] = mul nsw i32[[RHIV1]], 2 - // CHECK-DAG: [[SIV1:.+]] = load i32, i32* [[IV1]]{{.*}}!llvm.mem.parallel_loop_access ![[LOOP1_ID]] + // CHECK-DAG: [[SIV1:.+]] = load i32, i32* [[IV1]]{{.*}}!llvm.access.group ![[ACCESS_GROUP_2]] // CHECK-DAG: [[INDEX1:.+]] = sext i32[[SIV1]] to i64 - // CHECK-DAG: [[ARRAY1:.+]] = load i32*, i32** [[LIST1:.*]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP1_ID]] + // CHECK-DAG: [[ARRAY1:.+]] = load i32*, i32** [[LIST1:.*]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_2]] // CHECK-DAG: [[PTR1:.+]] = getelementptr inbounds i32, i32*[[ARRAY1]], i64[[INDEX1]] - // CHECK: store i32[[CALC1]], i32*[[PTR1]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP1_ID]] + // CHECK: store i32[[CALC1]], i32*[[PTR1]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_2]] // CHECK-NEXT: br label [[LOOP1_INC:[^,]+]] List[i] = i * 2; @@ -26,19 +26,19 @@ // Verify assume_safety interleaving is recognized. void interleave_test(int *List, int Length) { // CHECK: define {{.*}} @_Z15interleave_test -// CHECK: [[LOAD2_IV:.+]] = load i32, i32* [[IV2:[^,]+]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP2_ID:[0-9]+]] -// CHECK-NEXT: [[LOAD2_LEN:.+]] = load i32, i32* [[LEN2:.+]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP2_ID]] +// CHECK: [[LOAD2_IV:.+]] = load i32, i32* [[IV2:[^,]+]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_8:[0-9]+]] +// CHECK-NEXT: [[LOAD2_LEN:.+]] = load i32, i32* [[LEN2:.+]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_8]] // CHECK-NEXT: [[CMP2:.+]] = icmp slt i32[[LOAD2_IV]],[[LOAD2_LEN]] // CHECK-NEXT: br i1[[CMP2]], label %[[LOOP2_BODY:[^,]+]], label %[[LOOP2_END:[^,]+]] #pragma clang loop interleave(assume_safety) vectorize(disable) unroll(disable) for (int i = 0; i < Length; i++) { - // CHECK: [[RHIV2:.+]] = load i32, i32* [[IV2]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP2_ID]] + // CHECK: [[RHIV2:.+]] = load i32, i32* [[IV2]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_8]] // CHECK-DAG: [[CALC2:.+]] = mul nsw i32[[RHIV2]], 2 - // CHECK-DAG: [[SIV2:.+]] = load i32, i32* [[IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[LOOP2_ID]] + // CHECK-DAG: [[SIV2:.+]] = load i32, i32* [[IV2]]{{.*}}!llvm.access.group ![[ACCESS_GROUP_8]] // CHECK-DAG: [[INDEX2:.+]] = sext i32[[SIV2]] to i64 - // CHECK-DAG: [[ARRAY2:.+]] = load i32*, i32** [[LIST2:.*]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP2_ID]] + // CHECK-DAG: [[ARRAY2:.+]] = load i32*, i32** [[LIST2:.*]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_8]] // CHECK-DAG: [[PTR2:.+]] = getelementptr inbounds i32, i32*[[ARRAY2]], i64[[INDEX2]] - // CHECK: store i32[[CALC2]], i32*[[PTR2]], {{.*}}!llvm.mem.parallel_loop_access ![[LOOP2_ID]] + // CHECK: store i32[[CALC2]], i32*[[PTR2]], {{.*}}!llvm.access.group ![[ACCESS_GROUP_8]] // CHECK-NEXT: br label [[LOOP2_INC:[^,]+]] List[i] = i * 2; @@ -46,9 +46,13 @@ } } -// CHECK: ![[LOOP1_HINTS]] = distinct !{![[LOOP1_HINTS]], ![[INTERLEAVE_1:[0-9]+]], ![[INTENABLE_1:[0-9]+]], ![[UNROLL_DISABLE:[0-9]+]]} +// CHECK: ![[ACCESS_GROUP_2]] = distinct !{} +// CHECK: ![[LOOP1_HINTS]] = distinct !{![[LOOP1_HINTS]], ![[INTERLEAVE_1:[0-9]+]], ![[INTENABLE_1:[0-9]+]], ![[UNROLL_DISABLE:[0-9]+]], ![[PARALLEL_ACCESSES_7:[0-9]+]]} // CHECK: ![[INTERLEAVE_1]] = !{!"llvm.loop.interleave.count", i32 1} // CHCCK: ![[INTENABLE_1]] = !{!"llvm.loop.vectorize.enable", i1 true} // CHECK: ![[UNROLL_DISABLE]] = !{!"llvm.loop.unroll.disable"} -// CHECK: ![[LOOP2_HINTS]] = distinct !{![[LOOP2_HINTS]], ![[WIDTH_1:[0-9]+]], ![[INTENABLE_1]], ![[UNROLL_DISABLE]]} +// CHECK: ![[PARALLEL_ACCESSES_7]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_2]]} +// CHECK: ![[ACCESS_GROUP_8]] = distinct !{} +// CHECK: ![[LOOP2_HINTS]] = distinct !{![[LOOP2_HINTS]], ![[WIDTH_1:[0-9]+]], ![[INTENABLE_1]], ![[UNROLL_DISABLE]], ![[PARALLEL_ACCESSES_11:[0-9]+]]} // CHECK: ![[WIDTH_1]] = !{!"llvm.loop.vectorize.width", i32 1} +// CHECK: ![[PARALLEL_ACCESSES_11]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_8]]} Index: test/OpenMP/for_codegen.cpp =================================================================== --- test/OpenMP/for_codegen.cpp +++ test/OpenMP/for_codegen.cpp @@ -73,7 +73,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 @@ -114,7 +114,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 @@ -163,7 +163,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add i32 [[IV1_2]], 1 @@ -215,7 +215,7 @@ // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]] // ... loop body ... // End of body: store into a[i]: -// CHECK: store float [[RESULT:%.+]], float* {{%.+}}!llvm.mem.parallel_loop_access +// CHECK: store float [[RESULT:%.+]], float* {{%.+}}!llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1 @@ -256,7 +256,7 @@ // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]] // ... loop body ... // End of body: store into a[i]: -// CHECK: store float [[RESULT:%.+]], float* {{%.+}}!llvm.mem.parallel_loop_access +// CHECK: store float [[RESULT:%.+]], float* {{%.+}}!llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1 @@ -301,7 +301,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i64 [[IV1_2]], 1 @@ -343,7 +343,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group a[i] = b[i] * c[i] * d[i]; // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 Index: test/OpenMP/for_simd_codegen.cpp =================================================================== --- test/OpenMP/for_simd_codegen.cpp +++ test/OpenMP/for_simd_codegen.cpp @@ -73,21 +73,21 @@ // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]], -// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] -// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group +// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]] // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP2_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] -// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group // -// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] -// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] @@ -95,9 +95,9 @@ // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] a[k]++; k = k + 3; -// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 -// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.access.group // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] } // CHECK: [[SIMPLE_LOOP2_END]]: Index: test/OpenMP/loops_explicit_clauses_codegen.cpp =================================================================== --- test/OpenMP/loops_explicit_clauses_codegen.cpp +++ test/OpenMP/loops_explicit_clauses_codegen.cpp @@ -39,18 +39,18 @@ ; foo(); // CHECK: @{{.+}}foo -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access +// CHECK-NOT: @k{{.+}}!llvm.access.group +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group +// CHECK-NOT: @k{{.+}}!llvm.access.group // CHECK: sdiv i32 // CHECK: store i32 %{{.+}}, i32* @k, #pragma omp simd linear(k : 2) for (k = 0; k < argc; k++) bar(); // CHECK: @{{.+}}foo -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access +// CHECK-NOT: @k{{.+}}!llvm.access.group +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group +// CHECK-NOT: @k{{.+}}!llvm.access.group // CHECK: sdiv i32 // CHECK: store i32 %{{.+}}, i32* @k, foo(); @@ -60,9 +60,9 @@ bar() ; foo(); // CHECK: @{{.+}}foo -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access +// CHECK-NOT: @k{{.+}}!llvm.access.group +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group +// CHECK-NOT: @k{{.+}}!llvm.access.group // CHECK: sdiv i32 // CHECK: store i32 %{{.+}}, i32* @k, #pragma omp simd @@ -70,9 +70,9 @@ bar(); foo(); // CHECK: @{{.+}}foo -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access -// CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access +// CHECK-NOT: @k{{.+}}!llvm.access.group +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group +// CHECK-NOT: @k{{.+}}!llvm.access.group // CHECK: sdiv i32 // CHECK: store i32 %{{.+}}, i32* @k, #pragma omp simd collapse(2) @@ -110,7 +110,7 @@ // CHECK: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: br i1 // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: add nsw i32 %{{.+}}, 1 // CHECK: br label {{.+}}, !llvm.loop @@ -123,7 +123,7 @@ // CHECK: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: br i1 // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: add nsw i64 %{{.+}}, 1 // CHECK: br label {{.+}}, !llvm.loop @@ -137,7 +137,7 @@ // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: br i1 // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: add nsw i32 %{{.+}}, 1 // CHECK: br label {{.+}}, !llvm.loop @@ -150,7 +150,7 @@ // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: br i1 // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 -// CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access +// CHECK: i32 @{{.+}}bar{{.+}}!llvm.access.group // CHECK-NOT: getelementptr inbounds %struct.S, %struct.S* %{{.+}}, i32 0, i32 0 // CHECK: add nsw i64 %{{.+}}, 1 // CHECK: br label {{.+}}, !llvm.loop Index: test/OpenMP/ordered_codegen.cpp =================================================================== --- test/OpenMP/ordered_codegen.cpp +++ test/OpenMP/ordered_codegen.cpp @@ -44,7 +44,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK-NEXT: call void @__kmpc_end_ordered([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]]) // ... end of ordered region ... #pragma omp ordered @@ -93,7 +93,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK-NEXT: call void @__kmpc_end_ordered([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]]) // ... end of ordered region ... #pragma omp ordered threads @@ -147,7 +147,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK-NEXT: call void @__kmpc_end_ordered([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]]) // ... end of ordered region ... #pragma omp ordered @@ -198,7 +198,7 @@ // ... loop body ... // End of body: store into a[i]: // CHECK: store float [[RESULT:%.+]], float* {{%.+}} -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK-NEXT: call void @__kmpc_end_ordered([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]]) // ... end of ordered region ... #pragma omp ordered threads @@ -220,8 +220,8 @@ float f[10]; // CHECK-LABEL: foo_simd void foo_simd(int low, int up) { - // CHECK: store float 0.000000e+00, float* %{{.+}}, align {{[0-9]+}}, !llvm.mem.parallel_loop_access ! - // CHECK-NEXT: call void [[CAP_FUNC:@.+]](i32* %{{.+}}), !llvm.mem.parallel_loop_access ! + // CHECK: store float 0.000000e+00, float* %{{.+}}, align {{[0-9]+}}, !llvm.access.group ! + // CHECK-NEXT: call void [[CAP_FUNC:@.+]](i32* %{{.+}}), !llvm.access.group ! #pragma omp simd for (int i = low; i < up; ++i) { f[i] = 0.0; Index: test/OpenMP/parallel_for_simd_codegen.cpp =================================================================== --- test/OpenMP/parallel_for_simd_codegen.cpp +++ test/OpenMP/parallel_for_simd_codegen.cpp @@ -83,21 +83,21 @@ // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]], -// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] -// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group +// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]] // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP2_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] -// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group // -// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] -// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] @@ -105,9 +105,9 @@ // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] a[k]++; k = k + 3; -// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 -// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.access.group // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] } // CHECK: [[SIMPLE_LOOP2_END]]: Index: test/OpenMP/schedule_codegen.cpp =================================================================== --- test/OpenMP/schedule_codegen.cpp +++ test/OpenMP/schedule_codegen.cpp @@ -5,191 +5,191 @@ int main() { // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for simd for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for schedule(static) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for simd schedule(static) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for schedule(static, 2) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for simd schedule(static, 2) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for schedule(auto) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for simd schedule(auto) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for schedule(runtime) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for simd schedule(runtime) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for schedule(guided) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for simd schedule(guided) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for schedule(dynamic) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for simd schedule(dynamic) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for schedule(monotonic: static) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for simd schedule(monotonic: static) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for schedule(monotonic: static, 2) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_for_static_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_for_static_fini #pragma omp for simd schedule(monotonic: static, 2) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for schedule(monotonic: auto) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for simd schedule(monotonic: auto) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for schedule(monotonic: runtime) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for simd schedule(monotonic: runtime) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for schedule(monotonic: guided) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for simd schedule(monotonic: guided) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for schedule(monotonic: dynamic) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for simd schedule(monotonic: dynamic) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for schedule(nonmonotonic: guided) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for simd schedule(nonmonotonic: guided) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for schedule(nonmonotonic: dynamic) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group #pragma omp for simd schedule(nonmonotonic: dynamic) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for schedule(static) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for simd schedule(static) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for schedule(static, 2) ordered(1) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for simd schedule(static, 2) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for schedule(auto) ordered(1) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group #pragma omp for simd schedule(auto) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for schedule(runtime) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for simd schedule(runtime) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for schedule(guided) ordered(1) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for simd schedule(guided) ordered for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for schedule(dynamic) ordered(1) for(int i = 0; i < 10; ++i); // CHECK: @__kmpc_dispatch_init -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: @__kmpc_dispatch_next #pragma omp for simd schedule(dynamic) for(int i = 0; i < 10; ++i); Index: test/OpenMP/simd_codegen.cpp =================================================================== --- test/OpenMP/simd_codegen.cpp +++ test/OpenMP/simd_codegen.cpp @@ -21,23 +21,23 @@ #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]] -// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], 6 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]] for (int i = 3; i < 32; i += 5) { // CHECK: [[SIMPLE_LOOP1_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]] -// CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.access.group // ... loop body ... // End of body: store into a[i]: -// CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.access.group a[i] = b[i] * c[i] * d[i]; -// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 -// CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] +// CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.access.group // br label %{{.+}}, !llvm.loop !{{.+}} } // CHECK: [[SIMPLE_LOOP1_END]]: @@ -51,20 +51,20 @@ // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR]] // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] -// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV2]], 9 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP2_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] -// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group // -// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] -// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] @@ -72,9 +72,9 @@ // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] a[k]++; k = k + 3; -// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 -// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] +// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.access.group // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] } // CHECK: [[SIMPLE_LOOP2_END]]: @@ -101,35 +101,35 @@ // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:@[^,]+]] // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]] -// CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]] +// CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP3:%.+]] = icmp ult i64 [[IV3]], 4 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]] for (unsigned long long it = 2000; it >= 600; it-=400) { // CHECK: [[SIMPLE_LOOP3_BODY]]: // Start of body: calculate it from IV: -// CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]] -// CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.access.group // // Linear start and step are used to calculate current value of the linear variable. -// CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] -// CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] -// CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] -// CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] -// CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]{{.*}}!llvm.access.group +// CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]{{.*}}!llvm.access.group +// CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],{{.*}}!llvm.access.group +// CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.access.group // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]] -// CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]{{.*}}!llvm.access.group *g_ptr++ = 0.0; -// CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] -// CHECK: store double{{.*}}[[GEP_VAL]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]{{.*}}!llvm.access.group +// CHECK: store double{{.*}}[[GEP_VAL]]{{.*}}!llvm.access.group a[it + lin]++; // CHECK: [[FLT_INC:%.+]] = fadd float -// CHECK-NEXT: store float [[FLT_INC]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] -// CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK-NEXT: store float [[FLT_INC]],{{.*}}!llvm.access.group +// CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1 -// CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] +// CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.access.group } // CHECK: [[SIMPLE_LOOP3_END]]: // @@ -143,42 +143,42 @@ #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV4:%[^,]+]] -// CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]] +// CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP4:%.+]] = icmp slt i32 [[IV4]], 4 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]] for (short it = 6; it <= 20; it-=-4) { // CHECK: [[SIMPLE_LOOP4_BODY]]: // Start of body: calculate it from IV: -// CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] +// CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]] // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16 -// CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] +// CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.access.group -// CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] +// CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1 -// CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] +// CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.access.group } // CHECK: [[SIMPLE_LOOP4_END]]: #pragma omp simd // CHECK: store i32 0, i32* [[OMP_IV5:%[^,]+]] -// CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]] +// CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP5:%.+]] = icmp slt i32 [[IV5]], 26 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]] for (unsigned char it = 'z'; it >= 'a'; it+=-1) { // CHECK: [[SIMPLE_LOOP5_BODY]]: // Start of body: calculate it from IV: -// CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] +// CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.access.group // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]] // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8 -// CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] +// CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.access.group -// CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] +// CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1 -// CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] +// CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.access.group } // CHECK: [[SIMPLE_LOOP5_END]]: @@ -194,23 +194,23 @@ // CHECK: store i64 0, i64* [[OMP_IV7:%[^,]+]] // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]] // CHECK: [[SIMD_LOOP7_COND]]: -// CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]] +// CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP7:%.+]] = icmp slt i64 [[IV7]], 7 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]] for (long long i = -10; i < 10; i += 3) { // CHECK: [[SIMPLE_LOOP7_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] -// CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] -// CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.access.group +// CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.access.group // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32 -// CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],{{.+}}!llvm.access.group A = i; -// CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1 -// CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] +// CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.access.group } // CHECK: [[SIMPLE_LOOP7_END]]: // CHECK-NEXT: store i64 11, i64* @@ -224,22 +224,22 @@ #pragma omp simd reduction(*:R) // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]] // CHECK: [[SIMD_LOOP8_COND]]: -// CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID:[0-9]+]] +// CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP8:%.+]] = icmp slt i64 [[IV8]], 7 // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]] for (long long i = -10; i < 10; i += 3) { // CHECK: [[SIMPLE_LOOP8_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] +// CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] -// CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] -// CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] -// CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] +// CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.access.group +// CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.access.group +// CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],{{.+}}!llvm.access.group R *= i; -// CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] +// CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1 -// CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] +// CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]{{.*}}!llvm.access.group } // CHECK: [[SIMPLE_LOOP8_END]]: // CHECK-DAG: [[R_VAL:%.+]] = load i32, i32* [[R]], @@ -266,26 +266,26 @@ // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}}) // CHECK: store i64 0, i64* [[T1_OMP_IV:[^,]+]] // ... -// CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP1:%.+]] = icmp slt i64 [[IV]], 16 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] // CHECK: [[T1_BODY]]: // Loop counters i and j updates: -// CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 -// CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] -// CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.access.group +// CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] -// CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.access.group // simd.for.inc: -// CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 -// CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] +// CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: br label {{%.+}} // CHECK: [[T1_END]]: // CHECK: ret i32 0 @@ -338,15 +338,15 @@ // CHECK: store i32 0, i32* [[IT_OMP_IV:%[^,]+]] #pragma omp simd -// CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]] -// CHECK-NEXT: [[LAST_IT:%.+]] = load i32, i32* [[OMP_LAST_IT]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.access.group +// CHECK-NEXT: [[LAST_IT:%.+]] = load i32, i32* [[OMP_LAST_IT]]{{.+}}!llvm.access.group // CHECK-NEXT: [[NUM_IT:%.+]] = add nsw i32 [[LAST_IT]], 1 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], [[NUM_IT]] // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]] for (IterDouble i = ia; i < ib; ++i) { // CHECK: [[IT_BODY]]: // Start of body: calculate i from index: -// CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.access.group // Call of operator+ (i, IV). // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}} // ... loop body ... @@ -354,12 +354,12 @@ // Float multiply and save result. // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}} -// CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.access.group ++ic; // -// CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.access.group // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1 -// CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] +// CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.access.group // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]] } // CHECK: [[IT_END]]: @@ -377,7 +377,7 @@ // #pragma omp simd collapse(4) -// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.access.group // CHECK-NEXT: [[CMP:%.+]] = icmp ult i32 [[IV]], 120 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]] for (i = 1; i < 3; i++) // 2 iterations @@ -387,25 +387,25 @@ { // CHECK: [[COLL1_BODY]]: // Start of body: calculate i from index: -// CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.access.group // Calculation of the loop counters values. // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]] // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] -// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.access.group // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]] // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]] -// CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.access.group // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]] // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]] -// CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.access.group // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]] @@ -413,12 +413,12 @@ // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]] // ... loop body ... // End of body: store into a[i]: -// CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.access.group float res = b[j] * c[k]; a[i] = res * d[l]; -// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1 -// CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] +// CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.access.group // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]] // CHECK: [[COLL1_END]]: } @@ -445,8 +445,8 @@ // #pragma omp simd collapse(2) private(globalfloat, localint) -// CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]] -// CHECK-NEXT: [[LI:%.+]] = load i64, i64* [[OMP_LI:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.access.group +// CHECK-NEXT: [[LI:%.+]] = load i64, i64* [[OMP_LI:%[^,]+]]{{.+}}!llvm.access.group // CHECK-NEXT: [[NUMIT:%.+]] = add nsw i64 [[LI]], 1 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i64 [[IV]], [[NUMIT]] // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]] @@ -455,10 +455,10 @@ { // CHECK: [[WIDE1_BODY]]: // Start of body: calculate i from index: -// CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.access.group // Calculation of the loop counters values... // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]] -// CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.access.group // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]] // ... loop body ... // @@ -467,14 +467,14 @@ globalfloat = (float)j/i; float res = b[j] * c[j]; // Store into a[i]: -// CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.access.group a[i] = res * d[i]; // Then there's a store into private var localint: -// CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]{{.+}}!llvm.access.group localint = (int)j; -// CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1 -// CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] +// CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.access.group // // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]] // CHECK: [[WIDE1_END]]: @@ -505,20 +505,20 @@ // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_REF]] // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] -// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV]], 9 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP_BODY:.+]], label %[[SIMPLE_LOOP_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV_1:%.+]] = mul nsw i32 [[IV_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV_1]] -// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group // -// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] -// CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV_2]], 3 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] @@ -526,9 +526,9 @@ // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] a[k]++; k = k + 3; -// CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV_2]], 1 -// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.access.group // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP_ID]] } // CHECK: [[SIMPLE_LOOP_END]]: @@ -550,20 +550,20 @@ // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_REF]] // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] -// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV]], 9 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP_BODY:.+]], label %[[SIMPLE_LOOP_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV_1:%.+]] = mul nsw i32 [[IV_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV_1]] -// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group // -// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] -// CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV_2]], 3 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] @@ -571,9 +571,9 @@ // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] a[k]++; k = k + 3; -// CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV_2]], 1 -// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.access.group // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP_ID]] } // CHECK: [[SIMPLE_LOOP_END]]: @@ -591,20 +591,20 @@ // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[VAL_ADDR]] // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] -// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID:[0-9]+]] +// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV]], 9 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP_BODY:.+]], label %[[SIMPLE_LOOP_END:[^,]+]] for (int i = 10; i > 1; i--) { // CHECK: [[SIMPLE_LOOP_BODY]]: // Start of body: calculate i from IV: -// CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK: [[IV_0:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // FIXME: It is interesting, why the following "mul 1" was not constant folded? // CHECK-NEXT: [[IV_1:%.+]] = mul nsw i32 [[IV_0]], 1 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV_1]] -// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group // -// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] -// CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group +// CHECK-NEXT: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV_2]], 3 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] @@ -612,9 +612,9 @@ // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] a[k]++; k = k + 3; -// CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK: [[IV_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.access.group // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV_2]], 1 -// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP_ID]] +// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]]{{.*}}!llvm.access.group // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP_ID]] } // CHECK: [[SIMPLE_LOOP_END]]: @@ -635,7 +635,7 @@ #pragma omp simd // TERM_DEBUG-NOT: __kmpc_global_thread_num // TERM_DEBUG: invoke i32 {{.*}}bar{{.*}}() - // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]], + // TERM_DEBUG: unwind label %[[TERM_LPAD:[^,]+]], // TERM_DEBUG-NOT: __kmpc_global_thread_num // TERM_DEBUG: [[TERM_LPAD]] // TERM_DEBUG: call void @__clang_call_terminate Index: test/OpenMP/simd_metadata.c =================================================================== --- test/OpenMP/simd_metadata.c +++ test/OpenMP/simd_metadata.c @@ -49,8 +49,8 @@ c[i] = a[i] * a[i] + b[i] * b[t]; ++t; } -// do not emit parallel_loop_access metadata due to usage of safelen clause. -// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}} +// do not emit llvm.access.group metadata due to usage of safelen clause. +// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8) // CHECK: [[C_PTRINT:%.+]] = ptrtoint // CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 @@ -80,8 +80,8 @@ c[i] = a[i] * a[i] + b[i] * b[t]; ++t; } -// do not emit parallel_loop_access metadata due to usage of safelen clause. -// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}} +// do not emit llvm.access.group metadata due to usage of safelen clause. +// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8) // CHECK: [[C_PTRINT:%.+]] = ptrtoint // CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 @@ -110,7 +110,7 @@ for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; -// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}} +// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_7:[0-9]+]] } } @@ -122,8 +122,9 @@ for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; -// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access [[LOOP_H2_HEADER:![0-9]+]] +// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_10:[0-9]+]] } +// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H2_HEADER:![0-9]+]] } void h3(float *c, float *a, float *b, int size) @@ -134,9 +135,9 @@ for (int j = 0; j < size; ++j) { c[j*i] = a[i] * b[j]; } +// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_13:[0-9]+]] } -// do not emit parallel_loop_access for nested loop. -// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}} +// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H3_HEADER:![0-9]+]] } // Metadata for h1: @@ -145,11 +146,17 @@ // CHECK: [[LOOP_VEC_ENABLE]] = !{!"llvm.loop.vectorize.enable", i1 true} // CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8:![0-9]+]], [[LOOP_VEC_ENABLE]]} // CHECK: [[LOOP_WIDTH_8]] = !{!"llvm.loop.vectorize.width", i32 8} -// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8]], [[LOOP_VEC_ENABLE]]} +// CHECK: ![[ACCESS_GROUP_7]] = distinct !{} +// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8]], [[LOOP_VEC_ENABLE]], ![[PARALLEL_ACCESSES_9:[0-9]+]]} +// CHECK: ![[PARALLEL_ACCESSES_9]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_7]]} // // Metadata for h2: -// CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], [[LOOP_VEC_ENABLE]]} +// CHECK: ![[ACCESS_GROUP_10]] = distinct !{} +// CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], [[LOOP_VEC_ENABLE]], ![[PARALLEL_ACCESSES_12:[0-9]+]]} +// CHECK: ![[PARALLEL_ACCESSES_12]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_10]]} // // Metadata for h3: -// CHECK: [[LOOP_H3_HEADER:![0-9]+]] = distinct !{[[LOOP_H3_HEADER]], [[LOOP_VEC_ENABLE]]} +// CHECK: ![[ACCESS_GROUP_13]] = distinct !{} +// CHECK: [[LOOP_H3_HEADER]] = distinct !{[[LOOP_H3_HEADER]], [[LOOP_VEC_ENABLE]], ![[PARALLEL_ACCESSES_15:[0-9]+]]} +// CHECK: ![[PARALLEL_ACCESSES_15]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_13]]} // Index: test/OpenMP/target_parallel_for_simd_codegen.cpp =================================================================== --- test/OpenMP/target_parallel_for_simd_codegen.cpp +++ test/OpenMP/target_parallel_for_simd_codegen.cpp @@ -367,7 +367,7 @@ // CHECK-64: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i32* // CHECK-64: [[AA:%.+]] = load i32, i32* [[AA_CADDR]], align // CHECK-32: [[AA:%.+]] = load i32, i32* [[AA_ADDR]], align -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group // CHECK: !llvm.loop // CHECK: ret void // CHECK-NEXT: } Index: test/OpenMP/target_simd_codegen.cpp =================================================================== --- test/OpenMP/target_simd_codegen.cpp +++ test/OpenMP/target_simd_codegen.cpp @@ -342,7 +342,7 @@ // CHECK-64: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i32* // CHECK-64: [[AA:%.+]] = load i32, i32* [[AA_CADDR]], align // CHECK-32: [[AA:%.+]] = load i32, i32* [[AA_ADDR]], align -// CHECK: !llvm.mem.parallel_loop_access +// CHECK: !llvm.access.group // CHECK: !llvm.loop // CHECK: ret void // CHECK-NEXT: } Index: test/OpenMP/taskloop_simd_codegen.cpp =================================================================== --- test/OpenMP/taskloop_simd_codegen.cpp +++ test/OpenMP/taskloop_simd_codegen.cpp @@ -83,17 +83,17 @@ // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]], // CHECK: br label -// CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP1:!.+]] +// CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],{{.*}}!llvm.access.group // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64 -// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] +// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],{{.*}}!llvm.access.group // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]] // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}} -// CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] -// CHECK: store i32 %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] -// CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] +// CHECK: load i32, i32* %{{.*}}!llvm.access.group +// CHECK: store i32 %{{.*}}!llvm.access.group +// CHECK: load i32, i32* %{{.*}}!llvm.access.group // CHECK: add nsw i32 %{{.+}}, 1 -// CHECK: store i32 %{{.+}}, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] -// CHECK: br label %{{.*}}!llvm.loop [[LOOP1]] +// CHECK: store i32 %{{.+}}, i32* %{{.*}}!llvm.access.group +// CHECK: br label %{{.*}}!llvm.loop // CHECK: ret i32 0 // CHECK: define internal i32 [[TASK2]]( @@ -113,17 +113,17 @@ // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]], // CHECK: br label -// CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP2:!.+]] +// CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],{{.*}}!llvm.access.group // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64 -// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] +// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],{{.*}}!llvm.access.group // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]] // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}} -// CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] -// CHECK: store i32 %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] -// CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] +// CHECK: load i32, i32* %{{.*}}!llvm.access.group +// CHECK: store i32 %{{.*}}!llvm.access.group +// CHECK: load i32, i32* %{{.*}}!llvm.access.group // CHECK: add nsw i32 %{{.+}}, 1 -// CHECK: store i32 %{{.+}}, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] -// CHECK: br label %{{.*}}!llvm.loop [[LOOP2]] +// CHECK: store i32 %{{.+}}, i32* %{{.*}}!llvm.access.group +// CHECK: br label %{{.*}}!llvm.loop // CHECK: ret i32 0 // CHECK: define internal i32 [[TASK3]]( @@ -142,7 +142,7 @@ // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], // CHECK: store i64 [[LB_VAL]], i64* [[CNT:%.+]], // CHECK: br label -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: br label %{{.*}}!llvm.loop // CHECK: ret i32 0 @@ -192,14 +192,14 @@ // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]] // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}} // CHECK: load i32, i32* % -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: store i32 % -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: load i32, i32* % -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: add nsw i32 %{{.+}}, 1 // CHECK: store i32 %{{.+}}, i32* % -// CHECK-NOT: !llvm.mem.parallel_loop_access +// CHECK-NOT: !llvm.access.group // CHECK: br label %{{.*}}!llvm.loop // CHECK: ret i32 0