Index: lib/CodeGen/CGStmt.cpp =================================================================== --- lib/CodeGen/CGStmt.cpp +++ lib/CodeGen/CGStmt.cpp @@ -2149,10 +2149,11 @@ llvm::ConstantAsMetadata::get(Loc))); } - if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { - // Conservatively, mark all inline asm blocks in CUDA as convergent - // (meaning, they may call an intrinsically convergent op, such as bar.sync, - // and so can't have certain optimizations applied around them). + if (getLangOpts().assumeFunctionsAreConvergent()) { + // Conservatively, mark all inline asm blocks in CUDA or OpenCL as + // convergent (meaning, they may call an intrinsically convergent op, such + // as bar.sync, and so can't have certain optimizations applied around + // them). Result->addAttribute(llvm::AttributeList::FunctionIndex, llvm::Attribute::Convergent); } Index: test/CodeGenOpenCL/convergent.cl =================================================================== --- test/CodeGenOpenCL/convergent.cl +++ test/CodeGenOpenCL/convergent.cl @@ -126,6 +126,14 @@ // CHECK: declare spir_func void @nodupfun(){{[^#]*}} #[[attr3:[0-9]+]] +// CHECK-LABEL: @assume_convergent_asm +// CHECK: tail call void asm sideeffect "s_barrier", ""() #4 +kernel void assume_convergent_asm() +{ + __asm__ volatile("s_barrier"); + +} + // CHECK: attributes #0 = { noinline norecurse nounwind " // CHECK: attributes #1 = { {{[^}]*}}convergent{{[^}]*}} } // CHECK: attributes #2 = { {{[^}]*}}convergent{{[^}]*}} }