Index: cfe/trunk/lib/CodeGen/CGAtomic.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGAtomic.cpp +++ cfe/trunk/lib/CodeGen/CGAtomic.cpp @@ -221,11 +221,13 @@ /// \param IsWeak true if atomic operation is weak, false otherwise. /// \returns Pair of values: previous value from storage (value type) and /// boolean flag (i1 type) with true if success and false otherwise. - std::pair EmitAtomicCompareExchange( - RValue Expected, RValue Desired, - llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, - llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, - bool IsWeak = false); + std::pair + EmitAtomicCompareExchange(RValue Expected, RValue Desired, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, + bool IsWeak = false); /// \brief Emits atomic update. /// \param AO Atomic ordering. @@ -260,13 +262,17 @@ /// \brief Emits atomic compare-and-exchange op as a libcall. llvm::Value *EmitAtomicCompareExchangeLibcall( llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, - llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, - llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent); + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent); /// \brief Emits atomic compare-and-exchange op as LLVM instruction. std::pair EmitAtomicCompareExchangeOp( llvm::Value *ExpectedVal, llvm::Value *DesiredVal, - llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, - llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak = false); /// \brief Emit atomic update as libcalls. void @@ -289,17 +295,17 @@ AtomicExpr::AtomicOrderingKind AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) { switch (AO) { - case llvm::Unordered: - case llvm::NotAtomic: - case llvm::Monotonic: + case llvm::AtomicOrdering::Unordered: + case llvm::AtomicOrdering::NotAtomic: + case llvm::AtomicOrdering::Monotonic: return AtomicExpr::AO_ABI_memory_order_relaxed; - case llvm::Acquire: + case llvm::AtomicOrdering::Acquire: return AtomicExpr::AO_ABI_memory_order_acquire; - case llvm::Release: + case llvm::AtomicOrdering::Release: return AtomicExpr::AO_ABI_memory_order_release; - case llvm::AcquireRelease: + case llvm::AtomicOrdering::AcquireRelease: return AtomicExpr::AO_ABI_memory_order_acq_rel; - case llvm::SequentiallyConsistent: + case llvm::AtomicOrdering::SequentiallyConsistent: return AtomicExpr::AO_ABI_memory_order_seq_cst; } llvm_unreachable("Unhandled AtomicOrdering"); @@ -431,14 +437,14 @@ if (llvm::ConstantInt *FO = dyn_cast(FailureOrderVal)) { switch (FO->getSExtValue()) { default: - FailureOrder = llvm::Monotonic; + FailureOrder = llvm::AtomicOrdering::Monotonic; break; case AtomicExpr::AO_ABI_memory_order_consume: case AtomicExpr::AO_ABI_memory_order_acquire: - FailureOrder = llvm::Acquire; + FailureOrder = llvm::AtomicOrdering::Acquire; break; case AtomicExpr::AO_ABI_memory_order_seq_cst: - FailureOrder = llvm::SequentiallyConsistent; + FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent; break; } if (FailureOrder >= SuccessOrder) { @@ -455,9 +461,10 @@ llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, *SeqCstBB = nullptr; MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn); - if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release) + if (SuccessOrder != llvm::AtomicOrdering::Monotonic && + SuccessOrder != llvm::AtomicOrdering::Release) AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn); - if (SuccessOrder == llvm::SequentiallyConsistent) + if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent) SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn); llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn); @@ -471,13 +478,13 @@ // doesn't fold to a constant for the ordering. CGF.Builder.SetInsertPoint(MonotonicBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, - Size, SuccessOrder, llvm::Monotonic); + Size, SuccessOrder, llvm::AtomicOrdering::Monotonic); CGF.Builder.CreateBr(ContBB); if (AcquireBB) { CGF.Builder.SetInsertPoint(AcquireBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, - Size, SuccessOrder, llvm::Acquire); + Size, SuccessOrder, llvm::AtomicOrdering::Acquire); CGF.Builder.CreateBr(ContBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); @@ -486,8 +493,8 @@ } if (SeqCstBB) { CGF.Builder.SetInsertPoint(SeqCstBB); - emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, - Size, SuccessOrder, llvm::SequentiallyConsistent); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, + llvm::AtomicOrdering::SequentiallyConsistent); CGF.Builder.CreateBr(ContBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); @@ -1040,30 +1047,30 @@ switch (ord) { case AtomicExpr::AO_ABI_memory_order_relaxed: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::Monotonic); + Size, llvm::AtomicOrdering::Monotonic); break; case AtomicExpr::AO_ABI_memory_order_consume: case AtomicExpr::AO_ABI_memory_order_acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::Acquire); + Size, llvm::AtomicOrdering::Acquire); break; case AtomicExpr::AO_ABI_memory_order_release: if (IsLoad) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::Release); + Size, llvm::AtomicOrdering::Release); break; case AtomicExpr::AO_ABI_memory_order_acq_rel: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::AcquireRelease); + Size, llvm::AtomicOrdering::AcquireRelease); break; case AtomicExpr::AO_ABI_memory_order_seq_cst: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::SequentiallyConsistent); + Size, llvm::AtomicOrdering::SequentiallyConsistent); break; default: // invalid order // We should not ever get here normally, but it's hard to @@ -1104,12 +1111,12 @@ // Emit all the different atomics Builder.SetInsertPoint(MonotonicBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::Monotonic); + Size, llvm::AtomicOrdering::Monotonic); Builder.CreateBr(ContBB); if (!IsStore) { Builder.SetInsertPoint(AcquireBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::Acquire); + Size, llvm::AtomicOrdering::Acquire); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); @@ -1119,7 +1126,7 @@ if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::Release); + Size, llvm::AtomicOrdering::Release); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release), ReleaseBB); @@ -1127,14 +1134,14 @@ if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::AcquireRelease); + Size, llvm::AtomicOrdering::AcquireRelease); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel), AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, llvm::SequentiallyConsistent); + Size, llvm::AtomicOrdering::SequentiallyConsistent); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); @@ -1318,9 +1325,9 @@ llvm::AtomicOrdering AO; bool IsVolatile = LV.isVolatileQualified(); if (LV.getType()->isAtomicType()) { - AO = llvm::SequentiallyConsistent; + AO = llvm::AtomicOrdering::SequentiallyConsistent; } else { - AO = llvm::Acquire; + AO = llvm::AtomicOrdering::Acquire; IsVolatile = true; } return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot); @@ -1739,9 +1746,9 @@ bool IsVolatile = lvalue.isVolatileQualified(); llvm::AtomicOrdering AO; if (lvalue.getType()->isAtomicType()) { - AO = llvm::SequentiallyConsistent; + AO = llvm::AtomicOrdering::SequentiallyConsistent; } else { - AO = llvm::Release; + AO = llvm::AtomicOrdering::Release; IsVolatile = true; } return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); Index: cfe/trunk/lib/CodeGen/CGBuiltin.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGBuiltin.cpp +++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp @@ -105,9 +105,8 @@ llvm::Type *ValueType = Args[1]->getType(); Args[1] = EmitToInt(CGF, Args[1], T, IntType); - llvm::Value *Result = - CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], - llvm::SequentiallyConsistent); + llvm::Value *Result = CGF.Builder.CreateAtomicRMW( + Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); return EmitFromInt(CGF, Result, T, ValueType); } @@ -167,9 +166,8 @@ Args[1] = EmitToInt(CGF, Args[1], T, IntType); Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); - llvm::Value *Result = - CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], - llvm::SequentiallyConsistent); + llvm::Value *Result = CGF.Builder.CreateAtomicRMW( + Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); if (Invert) Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, @@ -206,9 +204,9 @@ Args[1] = EmitToInt(CGF, Args[1], T, IntType); Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); - Value *Pair = CGF.Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], - llvm::SequentiallyConsistent, - llvm::SequentiallyConsistent); + Value *Pair = CGF.Builder.CreateAtomicCmpXchg( + Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering::SequentiallyConsistent); if (ReturnBool) // Extract boolean success flag and zext it to int. return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), @@ -1295,7 +1293,7 @@ llvm::StoreInst *Store = Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, StoreSize); - Store->setAtomic(llvm::Release); + Store->setAtomic(llvm::AtomicOrdering::Release); return RValue::get(nullptr); } @@ -1307,7 +1305,7 @@ // any way to safely use it... but in practice, it mostly works // to use it with non-atomic loads and stores to get acquire/release // semantics. - Builder.CreateFence(llvm::SequentiallyConsistent); + Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); return RValue::get(nullptr); } @@ -1355,30 +1353,27 @@ switch (ord) { case 0: // memory_order_relaxed default: // invalid order - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - Ptr, NewVal, - llvm::Monotonic); + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::Monotonic); break; - case 1: // memory_order_consume - case 2: // memory_order_acquire - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - Ptr, NewVal, - llvm::Acquire); + case 1: // memory_order_consume + case 2: // memory_order_acquire + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::Acquire); break; - case 3: // memory_order_release - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - Ptr, NewVal, - llvm::Release); + case 3: // memory_order_release + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::Release); break; - case 4: // memory_order_acq_rel - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - Ptr, NewVal, - llvm::AcquireRelease); + case 4: // memory_order_acq_rel + + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::AcquireRelease); break; - case 5: // memory_order_seq_cst - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - Ptr, NewVal, - llvm::SequentiallyConsistent); + case 5: // memory_order_seq_cst + Result = Builder.CreateAtomicRMW( + llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::SequentiallyConsistent); break; } Result->setVolatile(Volatile); @@ -1395,9 +1390,9 @@ createBasicBlock("seqcst", CurFn) }; llvm::AtomicOrdering Orders[5] = { - llvm::Monotonic, llvm::Acquire, llvm::Release, - llvm::AcquireRelease, llvm::SequentiallyConsistent - }; + llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, + llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, + llvm::AtomicOrdering::SequentiallyConsistent}; Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); @@ -1441,13 +1436,13 @@ switch (ord) { case 0: // memory_order_relaxed default: // invalid order - Store->setOrdering(llvm::Monotonic); + Store->setOrdering(llvm::AtomicOrdering::Monotonic); break; case 3: // memory_order_release - Store->setOrdering(llvm::Release); + Store->setOrdering(llvm::AtomicOrdering::Release); break; case 5: // memory_order_seq_cst - Store->setOrdering(llvm::SequentiallyConsistent); + Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); break; } return RValue::get(nullptr); @@ -1461,8 +1456,8 @@ createBasicBlock("seqcst", CurFn) }; llvm::AtomicOrdering Orders[3] = { - llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent - }; + llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, + llvm::AtomicOrdering::SequentiallyConsistent}; Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); @@ -1501,16 +1496,17 @@ break; case 1: // memory_order_consume case 2: // memory_order_acquire - Builder.CreateFence(llvm::Acquire, Scope); + Builder.CreateFence(llvm::AtomicOrdering::Acquire, Scope); break; case 3: // memory_order_release - Builder.CreateFence(llvm::Release, Scope); + Builder.CreateFence(llvm::AtomicOrdering::Release, Scope); break; case 4: // memory_order_acq_rel - Builder.CreateFence(llvm::AcquireRelease, Scope); + Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, Scope); break; case 5: // memory_order_seq_cst - Builder.CreateFence(llvm::SequentiallyConsistent, Scope); + Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, + Scope); break; } return RValue::get(nullptr); @@ -1527,23 +1523,23 @@ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); Builder.SetInsertPoint(AcquireBB); - Builder.CreateFence(llvm::Acquire, Scope); + Builder.CreateFence(llvm::AtomicOrdering::Acquire, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(1), AcquireBB); SI->addCase(Builder.getInt32(2), AcquireBB); Builder.SetInsertPoint(ReleaseBB); - Builder.CreateFence(llvm::Release, Scope); + Builder.CreateFence(llvm::AtomicOrdering::Release, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(3), ReleaseBB); Builder.SetInsertPoint(AcqRelBB); - Builder.CreateFence(llvm::AcquireRelease, Scope); + Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(4), AcqRelBB); Builder.SetInsertPoint(SeqCstBB); - Builder.CreateFence(llvm::SequentiallyConsistent, Scope); + Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(5), SeqCstBB); @@ -1874,9 +1870,10 @@ llvm::Value *Comparand = Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); - auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, - SequentiallyConsistent, - SequentiallyConsistent); + auto Result = + Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent); Result->setVolatile(true); return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, @@ -1888,8 +1885,8 @@ EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(1)), - SequentiallyConsistent, - SequentiallyConsistent); + AtomicOrdering::SequentiallyConsistent, + AtomicOrdering::SequentiallyConsistent); CXI->setVolatile(true); return RValue::get(Builder.CreateExtractValue(CXI, 0)); } @@ -1898,7 +1895,7 @@ AtomicRMWInst::Add, EmitScalarExpr(E->getArg(0)), ConstantInt::get(Int32Ty, 1), - llvm::SequentiallyConsistent); + llvm::AtomicOrdering::SequentiallyConsistent); RMWI->setVolatile(true); return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1))); } @@ -1907,7 +1904,7 @@ AtomicRMWInst::Sub, EmitScalarExpr(E->getArg(0)), ConstantInt::get(Int32Ty, 1), - llvm::SequentiallyConsistent); + llvm::AtomicOrdering::SequentiallyConsistent); RMWI->setVolatile(true); return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1))); } @@ -1916,7 +1913,7 @@ AtomicRMWInst::Add, EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), - llvm::SequentiallyConsistent); + llvm::AtomicOrdering::SequentiallyConsistent); RMWI->setVolatile(true); return RValue::get(RMWI); } Index: cfe/trunk/lib/CodeGen/CGExprScalar.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExprScalar.cpp +++ cfe/trunk/lib/CodeGen/CGExprScalar.cpp @@ -1652,13 +1652,14 @@ llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); if (isPre) { Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified()) - ->setAtomic(llvm::SequentiallyConsistent); + ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); return Builder.getTrue(); } // For atomic bool increment, we just store true and return it for // preincrement, do an atomic swap with true for postincrement - return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - LV.getPointer(), True, llvm::SequentiallyConsistent); + return Builder.CreateAtomicRMW( + llvm::AtomicRMWInst::Xchg, LV.getPointer(), True, + llvm::AtomicOrdering::SequentiallyConsistent); } // Special case for atomic increment / decrement on integers, emit // atomicrmw instructions. We skip this if we want to be doing overflow @@ -1675,7 +1676,7 @@ llvm::Value *amt = CGF.EmitToMemory( llvm::ConstantInt::get(ConvertType(type), 1, true), type); llvm::Value *old = Builder.CreateAtomicRMW(aop, - LV.getPointer(), amt, llvm::SequentiallyConsistent); + LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent); return isPre ? Builder.CreateBinOp(op, old, amt) : old; } value = EmitLoadOfLValue(LV, E->getExprLoc()); @@ -2157,7 +2158,7 @@ E->getExprLoc()), LHSTy); Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt, - llvm::SequentiallyConsistent); + llvm::AtomicOrdering::SequentiallyConsistent); return LHSLV; } } Index: cfe/trunk/lib/CodeGen/CGObjC.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGObjC.cpp +++ cfe/trunk/lib/CodeGen/CGObjC.cpp @@ -906,7 +906,7 @@ Address ivarAddr = LV.getAddress(); ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); - load->setAtomic(llvm::Unordered); + load->setAtomic(llvm::AtomicOrdering::Unordered); // Store that value into the return address. Doing this with a // bitcast is likely to produce some pretty ugly IR, but it's not @@ -1183,7 +1183,7 @@ // Perform an atomic store. There are no memory ordering requirements. llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); - store->setAtomic(llvm::Unordered); + store->setAtomic(llvm::AtomicOrdering::Unordered); return; } Index: cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp +++ cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp @@ -3960,7 +3960,8 @@ if (EExpr) E = CGF.EmitAnyExpr(EExpr); CGF.EmitOMPAtomicSimpleUpdateExpr( - X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::Monotonic, Loc, + X, E, BO, /*IsXLHSInRHSPart=*/true, + llvm::AtomicOrdering::Monotonic, Loc, [&CGF, UpExpr, VD, IPriv, Loc](RValue XRValue) { CodeGenFunction::OMPPrivateScope PrivateScope(CGF); PrivateScope.addPrivate( Index: cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp +++ cfe/trunk/lib/CodeGen/CGStmtOpenMP.cpp @@ -2601,8 +2601,9 @@ if (LVal.isGlobalReg()) { CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); } else { - CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent - : llvm::Monotonic, + CGF.EmitAtomicStore(RVal, LVal, + IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent + : llvm::AtomicOrdering::Monotonic, LVal.isVolatile(), /*IsInit=*/false); } } @@ -2635,10 +2636,11 @@ LValue VLValue = CGF.EmitLValue(V); RValue Res = XLValue.isGlobalReg() ? CGF.EmitLoadOfLValue(XLValue, Loc) - : CGF.EmitAtomicLoad(XLValue, Loc, - IsSeqCst ? llvm::SequentiallyConsistent - : llvm::Monotonic, - XLValue.isVolatile()); + : CGF.EmitAtomicLoad( + XLValue, Loc, + IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent + : llvm::AtomicOrdering::Monotonic, + XLValue.isVolatile()); // OpenMP, 2.12.6, atomic Construct // Any atomic construct with a seq_cst clause forces the atomically // performed operation to include an implicit flush operation without a @@ -2794,7 +2796,8 @@ assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); LValue XLValue = CGF.EmitLValue(X); RValue ExprRValue = CGF.EmitAnyExpr(E); - auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; + auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent + : llvm::AtomicOrdering::Monotonic; auto *LHS = cast(BOUE->getLHS()->IgnoreImpCasts()); auto *RHS = cast(BOUE->getRHS()->IgnoreImpCasts()); auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; @@ -2843,7 +2846,8 @@ LValue VLValue = CGF.EmitLValue(V); LValue XLValue = CGF.EmitLValue(X); RValue ExprRValue = CGF.EmitAnyExpr(E); - auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; + auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent + : llvm::AtomicOrdering::Monotonic; QualType NewVValType; if (UE) { // 'x' is updated with some additional value. @@ -3206,4 +3210,3 @@ cast(S.getAssociatedStmt())->getCapturedStmt()); }); } - Index: cfe/trunk/lib/CodeGen/CodeGenFunction.h =================================================================== --- cfe/trunk/lib/CodeGen/CodeGenFunction.h +++ cfe/trunk/lib/CodeGen/CodeGenFunction.h @@ -2483,8 +2483,10 @@ std::pair EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, - llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, - llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored()); void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, Index: cfe/trunk/lib/CodeGen/ItaniumCXXABI.cpp =================================================================== --- cfe/trunk/lib/CodeGen/ItaniumCXXABI.cpp +++ cfe/trunk/lib/CodeGen/ItaniumCXXABI.cpp @@ -1999,7 +1999,7 @@ // // In LLVM, we do this by marking the load Acquire. if (threadsafe) - LI->setAtomic(llvm::Acquire); + LI->setAtomic(llvm::AtomicOrdering::Acquire); // For ARM, we should only check the first bit, rather than the entire byte: //