diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -2460,10 +2460,14 @@ if (isa(CurInst)) return false; - // We don't currently value number ANY inline asm calls. - if (auto *CallB = dyn_cast(CurInst)) + if (auto *CallB = dyn_cast(CurInst)) { + // We don't currently value number ANY inline asm calls. if (CallB->isInlineAsm()) return false; + // Don't do PRE on convergent calls. + if (CallB->isConvergent()) + return false; + } uint32_t ValNo = VN.lookup(CurInst); diff --git a/llvm/test/Transforms/GVN/pre-skip-convergent.ll b/llvm/test/Transforms/GVN/pre-skip-convergent.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/GVN/pre-skip-convergent.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -gvn -o - %s | FileCheck %s +; RUN: opt -S -passes=gvn -o - %s | FileCheck %s + +define i32 @foo(i1 %cond, i32* %q, i32* %p) { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[V0:%.*]] = call i32 @llvm.convergent(i32 0) +; CHECK-NEXT: store i32 [[V0]], i32* [[Q:%.*]], align 4 +; CHECK-NEXT: br i1 [[COND:%.*]], label [[PRE:%.*]], label [[MERGE:%.*]] +; CHECK: pre: +; CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: br label [[MERGE]] +; CHECK: merge: +; CHECK-NEXT: [[M0:%.*]] = phi i32 [ [[T0]], [[PRE]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[R0:%.*]] = call i32 @llvm.convergent(i32 [[M0]]) +; CHECK-NEXT: ret i32 [[R0]] +; +entry: + %v0 = call i32 @llvm.convergent(i32 0) + store i32 %v0, i32* %q + br i1 %cond, label %pre, label %merge + +pre: + %t0 = load i32, i32* %p + br label %merge + +merge: + %m0 = phi i32 [ %t0, %pre ], [ 0, %entry ] + %r0 = call i32 @llvm.convergent(i32 %m0) + ret i32 %r0 +} + +declare i32 @llvm.convergent(i32) #0 + +attributes #0 = { convergent nounwind readnone }