Index: include/llvm/CodeGen/AtomicExpandUtils.h =================================================================== --- include/llvm/CodeGen/AtomicExpandUtils.h +++ include/llvm/CodeGen/AtomicExpandUtils.h @@ -53,5 +53,6 @@ /// /// Returns true if the containing function was modified. bool -expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory); +expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory, + const bool IsRelaxed); } Index: lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- lib/CodeGen/AtomicExpandPass.cpp +++ lib/CodeGen/AtomicExpandPass.cpp @@ -251,7 +251,7 @@ return expandAtomicRMWToLLSC(AI); } case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: { - return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun); + return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun, false); } } llvm_unreachable("Unhandled case in tryExpandAtomicRMW"); @@ -512,7 +512,8 @@ } bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, - CreateCmpXchgInstFun CreateCmpXchg) { + CreateCmpXchgInstFun CreateCmpXchg, + const bool IsRelaxed) { assert(AI); AtomicOrdering MemOpOrder = @@ -551,6 +552,9 @@ LoadInst *InitLoaded = Builder.CreateLoad(Addr); // Atomics require at least natural alignment. InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8); + if(IsRelaxed) { + InitLoaded->setOrdering(SequentiallyConsistent); + } Builder.CreateBr(LoopBB); // Start the main loop block now that we've taken care of the preliminaries.