Index: llvm/trunk/docs/Atomics.rst =================================================================== --- llvm/trunk/docs/Atomics.rst +++ llvm/trunk/docs/Atomics.rst @@ -367,7 +367,7 @@ that they return true for any operation which is volatile or at least Monotonic. -* ``isAtLeastAcquire()``/``isAtLeastRelease()``: These are predicates on +* ``isStrongerThan`` / ``isAtLeastOrStrongerThan``: These are predicates on orderings. They can be useful for passes that are aware of atomics, for example to do DSE across a single atomic access, but not across a release-acquire pair (see MemoryDependencyAnalysis for an example of this) Index: llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h +++ llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1127,13 +1127,15 @@ AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { // This must match encodeMemSDNodeFlags() in SelectionDAG.cpp. - assert((SuccessOrdering & 15) == SuccessOrdering && + assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) == + SuccessOrdering && "Ordering may not require more than 4 bits!"); - assert((FailureOrdering & 15) == FailureOrdering && + assert((AtomicOrdering)((unsigned)FailureOrdering & 15) == + FailureOrdering && "Ordering may not require more than 4 bits!"); assert((SynchScope & 1) == SynchScope && "SynchScope may not require more than 1 bit!"); - SubclassData |= SuccessOrdering << 8; + SubclassData |= (unsigned)SuccessOrdering << 8; SubclassData |= SynchScope << 12; this->FailureOrdering = FailureOrdering; assert(getSuccessOrdering() == SuccessOrdering && Index: llvm/trunk/include/llvm/IR/Instructions.h =================================================================== --- llvm/trunk/include/llvm/IR/Instructions.h +++ llvm/trunk/include/llvm/IR/Instructions.h @@ -36,10 +36,16 @@ class DataLayout; class LLVMContext; -enum AtomicOrdering { +/// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and +/// Unordered, which are both below the C++ orders. See docs/Atomics.rst for +/// details. +/// +/// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst +/// \-->consume-->acquire--/ +enum class AtomicOrdering { NotAtomic = 0, Unordered = 1, - Monotonic = 2, + Monotonic = 2, // Equivalent to C++'s relaxed. // Consume = 3, // Not specified yet. Acquire = 4, Release = 5, @@ -47,26 +53,68 @@ SequentiallyConsistent = 7 }; +/// String used by LLVM IR to represent atomic ordering. +static inline const char *toIRString(AtomicOrdering ao) { + static const char *names[8] = {"not_atomic", "unordered", "monotonic", + "consume", "acquire", "release", + "acq_rel", "seq_cst"}; + return names[(size_t)ao]; +} + +/// Returns true if ao is stronger than other as defined by the AtomicOrdering +/// lattice, which is based on C++'s definition. +static inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) { + static const bool lookup[8][8] = { + // NA UN RX CO AC RE AR SC + /* NotAtomic */ {0, 0, 0, 0, 0, 0, 0, 0}, + /* Unordered */ {1, 0, 0, 0, 0, 0, 0, 0}, + /* relaxed */ {1, 1, 0, 0, 0, 0, 0, 0}, + /* consume */ {1, 1, 1, 0, 0, 0, 0, 0}, + /* acquire */ {1, 1, 1, 1, 0, 0, 0, 0}, + /* release */ {1, 1, 1, 0, 0, 0, 0, 0}, + /* acq_rel */ {1, 1, 1, 1, 1, 1, 0, 0}, + /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 0}, + }; + return lookup[(size_t)ao][(size_t)other]; +} + +static inline bool isAtLeastOrStrongerThan(AtomicOrdering ao, + AtomicOrdering other) { + static const bool lookup[8][8] = { + // NA UN RX CO AC RE AR SC + /* NotAtomic */ {1, 0, 0, 0, 0, 0, 0, 0}, + /* Unordered */ {1, 1, 0, 0, 0, 0, 0, 0}, + /* relaxed */ {1, 1, 1, 0, 0, 0, 0, 0}, + /* consume */ {1, 1, 1, 1, 0, 0, 0, 0}, + /* acquire */ {1, 1, 1, 1, 1, 0, 0, 0}, + /* release */ {1, 1, 1, 0, 0, 1, 0, 0}, + /* acq_rel */ {1, 1, 1, 1, 1, 1, 1, 0}, + /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 1}, + }; + return lookup[(size_t)ao][(size_t)other]; +} + +static inline bool isStrongerThanUnordered(AtomicOrdering Ord) { + return isStrongerThan(Ord, AtomicOrdering::Unordered); +} + +static inline bool isStrongerThanMonotonic(AtomicOrdering Ord) { + return isStrongerThan(Ord, AtomicOrdering::Monotonic); +} + +static inline bool isAcquireOrStronger(AtomicOrdering Ord) { + return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Acquire); +} + +static inline bool isReleaseOrStronger(AtomicOrdering Ord) { + return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Release); +} + enum SynchronizationScope { SingleThread = 0, CrossThread = 1 }; -/// Returns true if the ordering is at least as strong as acquire -/// (i.e. acquire, acq_rel or seq_cst) -inline bool isAtLeastAcquire(AtomicOrdering Ord) { - return (Ord == Acquire || - Ord == AcquireRelease || - Ord == SequentiallyConsistent); -} - -/// Returns true if the ordering is at least as strong as release -/// (i.e. release, acq_rel or seq_cst) -inline bool isAtLeastRelease(AtomicOrdering Ord) { -return (Ord == Release || - Ord == AcquireRelease || - Ord == SequentiallyConsistent); -} //===----------------------------------------------------------------------===// // AllocaInst Class @@ -269,7 +317,7 @@ /// AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | - (Ordering << 7)); + ((unsigned)Ordering << 7)); } SynchronizationScope getSynchScope() const { @@ -292,7 +340,9 @@ bool isSimple() const { return !isAtomic() && !isVolatile(); } bool isUnordered() const { - return getOrdering() <= Unordered && !isVolatile(); + return (getOrdering() == AtomicOrdering::NotAtomic || + getOrdering() == AtomicOrdering::Unordered) && + !isVolatile(); } Value *getPointerOperand() { return getOperand(0); } @@ -390,7 +440,7 @@ /// AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | - (Ordering << 7)); + ((unsigned)Ordering << 7)); } SynchronizationScope getSynchScope() const { @@ -413,7 +463,9 @@ bool isSimple() const { return !isAtomic() && !isVolatile(); } bool isUnordered() const { - return getOrdering() <= Unordered && !isVolatile(); + return (getOrdering() == AtomicOrdering::NotAtomic || + getOrdering() == AtomicOrdering::Unordered) && + !isVolatile(); } Value *getValueOperand() { return getOperand(0); } @@ -489,7 +541,7 @@ /// AcquireRelease, or SequentiallyConsistent. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | - (Ordering << 1)); + ((unsigned)Ordering << 1)); } SynchronizationScope getSynchScope() const { @@ -584,17 +636,17 @@ /// Set the ordering constraint on this cmpxchg. void setSuccessOrdering(AtomicOrdering Ordering) { - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | - (Ordering << 2)); + ((unsigned)Ordering << 2)); } void setFailureOrdering(AtomicOrdering Ordering) { - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | - (Ordering << 5)); + ((unsigned)Ordering << 5)); } /// Specify whether this cmpxchg is atomic and orders other operations with @@ -646,15 +698,16 @@ static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { switch (SuccessOrdering) { - default: llvm_unreachable("invalid cmpxchg success ordering"); - case Release: - case Monotonic: - return Monotonic; - case AcquireRelease: - case Acquire: - return Acquire; - case SequentiallyConsistent: - return SequentiallyConsistent; + default: + llvm_unreachable("invalid cmpxchg success ordering"); + case AtomicOrdering::Release: + case AtomicOrdering::Monotonic: + return AtomicOrdering::Monotonic; + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::Acquire: + return AtomicOrdering::Acquire; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; } } @@ -770,10 +823,10 @@ /// Set the ordering constraint on this RMW. void setOrdering(AtomicOrdering Ordering) { - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | - (Ordering << 2)); + ((unsigned)Ordering << 2)); } /// Specify whether this RMW orders other operations with respect to all Index: llvm/trunk/include/llvm/Target/TargetLowering.h =================================================================== --- llvm/trunk/include/llvm/Target/TargetLowering.h +++ llvm/trunk/include/llvm/Target/TargetLowering.h @@ -1108,7 +1108,7 @@ virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (isAtLeastRelease(Ord) && IsStore) + if (isReleaseOrStronger(Ord) && IsStore) return Builder.CreateFence(Ord); else return nullptr; @@ -1117,7 +1117,7 @@ virtual Instruction *emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (isAtLeastAcquire(Ord)) + if (isAcquireOrStronger(Ord)) return Builder.CreateFence(Ord); else return nullptr; Index: llvm/trunk/lib/Analysis/AliasAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/AliasAnalysis.cpp +++ llvm/trunk/lib/Analysis/AliasAnalysis.cpp @@ -389,7 +389,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, const MemoryLocation &Loc) { // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. - if (CX->getSuccessOrdering() > Monotonic) + if (isStrongerThanMonotonic(CX->getSuccessOrdering())) return MRI_ModRef; // If the cmpxchg address does not alias the location, it does not access it. @@ -402,7 +402,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc) { // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. - if (RMW->getOrdering() > Monotonic) + if (isStrongerThanMonotonic(RMW->getOrdering())) return MRI_ModRef; // If the atomicrmw address does not alias the location, it does not access it. Index: llvm/trunk/lib/Analysis/AliasSetTracker.cpp =================================================================== --- llvm/trunk/lib/Analysis/AliasSetTracker.cpp +++ llvm/trunk/lib/Analysis/AliasSetTracker.cpp @@ -300,7 +300,7 @@ bool AliasSetTracker::add(LoadInst *LI) { - if (LI->getOrdering() > Monotonic) return addUnknown(LI); + if (isStrongerThanMonotonic(LI->getOrdering())) return addUnknown(LI); AAMDNodes AAInfo; LI->getAAMetadata(AAInfo); @@ -316,7 +316,7 @@ } bool AliasSetTracker::add(StoreInst *SI) { - if (SI->getOrdering() > Monotonic) return addUnknown(SI); + if (isStrongerThanMonotonic(SI->getOrdering())) return addUnknown(SI); AAMDNodes AAInfo; SI->getAAMetadata(AAInfo); Index: llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp +++ llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -93,7 +93,7 @@ Loc = MemoryLocation::get(LI); return MRI_Ref; } - if (LI->getOrdering() == Monotonic) { + if (LI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(LI); return MRI_ModRef; } @@ -106,7 +106,7 @@ Loc = MemoryLocation::get(SI); return MRI_Mod; } - if (SI->getOrdering() == Monotonic) { + if (SI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(SI); return MRI_ModRef; } @@ -518,11 +518,11 @@ // A Monotonic (or higher) load is OK if the query inst is itself not // atomic. // FIXME: This is overly conservative. - if (LI->isAtomic() && LI->getOrdering() > Unordered) { + if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || isOtherMemAccess(QueryInst)) return MemDepResult::getClobber(LI); - if (LI->getOrdering() != Monotonic) + if (LI->getOrdering() != AtomicOrdering::Monotonic) return MemDepResult::getClobber(LI); } @@ -588,7 +588,7 @@ if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || isOtherMemAccess(QueryInst)) return MemDepResult::getClobber(SI); - if (SI->getOrdering() != Monotonic) + if (SI->getOrdering() != AtomicOrdering::Monotonic) return MemDepResult::getClobber(SI); } @@ -644,9 +644,9 @@ // loads. DSE uses this to find preceeding stores to delete and thus we // can't bypass the fence if the query instruction is a store. if (FenceInst *FI = dyn_cast(Inst)) - if (isLoad && FI->getOrdering() == Release) + if (isLoad && FI->getOrdering() == AtomicOrdering::Release) continue; - + // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc); // If necessary, perform additional analysis. @@ -1708,4 +1708,3 @@ MemDep.emplace(AA, AC, TLI, DT); return false; } - Index: llvm/trunk/lib/AsmParser/LLParser.cpp =================================================================== --- llvm/trunk/lib/AsmParser/LLParser.cpp +++ llvm/trunk/lib/AsmParser/LLParser.cpp @@ -1810,12 +1810,16 @@ bool LLParser::ParseOrdering(AtomicOrdering &Ordering) { switch (Lex.getKind()) { default: return TokError("Expected ordering on atomic instruction"); - case lltok::kw_unordered: Ordering = Unordered; break; - case lltok::kw_monotonic: Ordering = Monotonic; break; - case lltok::kw_acquire: Ordering = Acquire; break; - case lltok::kw_release: Ordering = Release; break; - case lltok::kw_acq_rel: Ordering = AcquireRelease; break; - case lltok::kw_seq_cst: Ordering = SequentiallyConsistent; break; + case lltok::kw_unordered: Ordering = AtomicOrdering::Unordered; break; + case lltok::kw_monotonic: Ordering = AtomicOrdering::Monotonic; break; + // Not specified yet: + // case lltok::kw_consume: Ordering = AtomicOrdering::Consume; break; + case lltok::kw_acquire: Ordering = AtomicOrdering::Acquire; break; + case lltok::kw_release: Ordering = AtomicOrdering::Release; break; + case lltok::kw_acq_rel: Ordering = AtomicOrdering::AcquireRelease; break; + case lltok::kw_seq_cst: + Ordering = AtomicOrdering::SequentiallyConsistent; + break; } Lex.Lex(); return false; @@ -5884,7 +5888,7 @@ unsigned Alignment = 0; bool AteExtraComma = false; bool isAtomic = false; - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; if (Lex.getKind() == lltok::kw_atomic) { @@ -5911,7 +5915,8 @@ return Error(Loc, "load operand must be a pointer to a first class type"); if (isAtomic && !Alignment) return Error(Loc, "atomic load must have explicit non-zero alignment"); - if (Ordering == Release || Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::Release || + Ordering == AtomicOrdering::AcquireRelease) return Error(Loc, "atomic load cannot use Release ordering"); if (Ty != cast(Val->getType())->getElementType()) @@ -5932,7 +5937,7 @@ unsigned Alignment = 0; bool AteExtraComma = false; bool isAtomic = false; - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; if (Lex.getKind() == lltok::kw_atomic) { @@ -5961,7 +5966,8 @@ return Error(Loc, "stored value and pointer type do not match"); if (isAtomic && !Alignment) return Error(Loc, "atomic store must have explicit non-zero alignment"); - if (Ordering == Acquire || Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::Acquire || + Ordering == AtomicOrdering::AcquireRelease) return Error(Loc, "atomic store cannot use Acquire ordering"); Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope); @@ -5974,8 +5980,8 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) { Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc; bool AteExtraComma = false; - AtomicOrdering SuccessOrdering = NotAtomic; - AtomicOrdering FailureOrdering = NotAtomic; + AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic; + AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; bool isVolatile = false; bool isWeak = false; @@ -5995,12 +6001,16 @@ ParseOrdering(FailureOrdering)) return true; - if (SuccessOrdering == Unordered || FailureOrdering == Unordered) + if (SuccessOrdering == AtomicOrdering::Unordered || + FailureOrdering == AtomicOrdering::Unordered) return TokError("cmpxchg cannot be unordered"); - if (SuccessOrdering < FailureOrdering) - return TokError("cmpxchg must be at least as ordered on success as failure"); - if (FailureOrdering == Release || FailureOrdering == AcquireRelease) - return TokError("cmpxchg failure ordering cannot include release semantics"); + if (isStrongerThan(FailureOrdering, SuccessOrdering)) + return TokError("cmpxchg failure argument shall be no stronger than the " + "success argument"); + if (FailureOrdering == AtomicOrdering::Release || + FailureOrdering == AtomicOrdering::AcquireRelease) + return TokError( + "cmpxchg failure ordering cannot include release semantics"); if (!Ptr->getType()->isPointerTy()) return Error(PtrLoc, "cmpxchg operand must be a pointer"); if (cast(Ptr->getType())->getElementType() != Cmp->getType()) @@ -6023,7 +6033,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) { Value *Ptr, *Val; LocTy PtrLoc, ValLoc; bool AteExtraComma = false; - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; bool isVolatile = false; AtomicRMWInst::BinOp Operation; @@ -6053,7 +6063,7 @@ ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering)) return true; - if (Ordering == Unordered) + if (Ordering == AtomicOrdering::Unordered) return TokError("atomicrmw cannot be unordered"); if (!Ptr->getType()->isPointerTy()) return Error(PtrLoc, "atomicrmw operand must be a pointer"); @@ -6076,14 +6086,14 @@ /// ParseFence /// ::= 'fence' 'singlethread'? AtomicOrdering int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) { - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering)) return true; - if (Ordering == Unordered) + if (Ordering == AtomicOrdering::Unordered) return TokError("fence cannot be unordered"); - if (Ordering == Monotonic) + if (Ordering == AtomicOrdering::Monotonic) return TokError("fence cannot be monotonic"); Inst = new FenceInst(Context, Ordering, Scope); Index: llvm/trunk/lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- llvm/trunk/lib/Bitcode/Reader/BitcodeReader.cpp +++ llvm/trunk/lib/Bitcode/Reader/BitcodeReader.cpp @@ -808,14 +808,14 @@ static AtomicOrdering getDecodedOrdering(unsigned Val) { switch (Val) { - case bitc::ORDERING_NOTATOMIC: return NotAtomic; - case bitc::ORDERING_UNORDERED: return Unordered; - case bitc::ORDERING_MONOTONIC: return Monotonic; - case bitc::ORDERING_ACQUIRE: return Acquire; - case bitc::ORDERING_RELEASE: return Release; - case bitc::ORDERING_ACQREL: return AcquireRelease; + case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic; + case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered; + case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic; + case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire; + case bitc::ORDERING_RELEASE: return AtomicOrdering::Release; + case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease; default: // Map unknown orderings to sequentially-consistent. - case bitc::ORDERING_SEQCST: return SequentiallyConsistent; + case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent; } } @@ -4936,10 +4936,11 @@ Ty = cast(Op->getType())->getElementType(); AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); - if (Ordering == NotAtomic || Ordering == Release || - Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Release || + Ordering == AtomicOrdering::AcquireRelease) return error("Invalid record"); - if (Ordering != NotAtomic && Record[OpNum] == 0) + if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); @@ -4992,11 +4993,12 @@ typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) return EC; AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); - if (Ordering == NotAtomic || Ordering == Acquire || - Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Acquire || + Ordering == AtomicOrdering::AcquireRelease) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); - if (Ordering != NotAtomic && Record[OpNum] == 0) + if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) return error("Invalid record"); unsigned Align; @@ -5022,7 +5024,8 @@ Record.size() < OpNum + 3 || Record.size() > OpNum + 5) return error("Invalid record"); AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]); - if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered) + if (SuccessOrdering == AtomicOrdering::NotAtomic || + SuccessOrdering == AtomicOrdering::Unordered) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]); @@ -5067,7 +5070,8 @@ Operation > AtomicRMWInst::LAST_BINOP) return error("Invalid record"); AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); - if (Ordering == NotAtomic || Ordering == Unordered) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Unordered) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope); @@ -5079,8 +5083,9 @@ if (2 != Record.size()) return error("Invalid record"); AtomicOrdering Ordering = getDecodedOrdering(Record[0]); - if (Ordering == NotAtomic || Ordering == Unordered || - Ordering == Monotonic) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Unordered || + Ordering == AtomicOrdering::Monotonic) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]); I = new FenceInst(Context, Ordering, SynchScope); Index: llvm/trunk/lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- llvm/trunk/lib/Bitcode/Writer/BitcodeWriter.cpp +++ llvm/trunk/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -133,13 +133,13 @@ static unsigned GetEncodedOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return bitc::ORDERING_NOTATOMIC; - case Unordered: return bitc::ORDERING_UNORDERED; - case Monotonic: return bitc::ORDERING_MONOTONIC; - case Acquire: return bitc::ORDERING_ACQUIRE; - case Release: return bitc::ORDERING_RELEASE; - case AcquireRelease: return bitc::ORDERING_ACQREL; - case SequentiallyConsistent: return bitc::ORDERING_SEQCST; + case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC; + case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED; + case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC; + case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE; + case AtomicOrdering::Release: return bitc::ORDERING_RELEASE; + case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL; + case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST; } llvm_unreachable("Invalid ordering"); } Index: llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp +++ llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp @@ -101,37 +101,37 @@ assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction"); if (TLI->shouldInsertFencesForAtomic(I)) { - auto FenceOrdering = Monotonic; + auto FenceOrdering = AtomicOrdering::Monotonic; bool IsStore, IsLoad; - if (LI && isAtLeastAcquire(LI->getOrdering())) { + if (LI && isAcquireOrStronger(LI->getOrdering())) { FenceOrdering = LI->getOrdering(); - LI->setOrdering(Monotonic); + LI->setOrdering(AtomicOrdering::Monotonic); IsStore = false; IsLoad = true; - } else if (SI && isAtLeastRelease(SI->getOrdering())) { + } else if (SI && isReleaseOrStronger(SI->getOrdering())) { FenceOrdering = SI->getOrdering(); - SI->setOrdering(Monotonic); + SI->setOrdering(AtomicOrdering::Monotonic); IsStore = true; IsLoad = false; - } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) || - isAtLeastAcquire(RMWI->getOrdering()))) { + } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) || + isAcquireOrStronger(RMWI->getOrdering()))) { FenceOrdering = RMWI->getOrdering(); - RMWI->setOrdering(Monotonic); + RMWI->setOrdering(AtomicOrdering::Monotonic); IsStore = IsLoad = true; } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) && - (isAtLeastRelease(CASI->getSuccessOrdering()) || - isAtLeastAcquire(CASI->getSuccessOrdering()))) { + (isReleaseOrStronger(CASI->getSuccessOrdering()) || + isAcquireOrStronger(CASI->getSuccessOrdering()))) { // If a compare and swap is lowered to LL/SC, we can do smarter fence // insertion, with a stronger one on the success path than on the // failure path. As a result, fence insertion is directly done by // expandAtomicCmpXchg in that case. FenceOrdering = CASI->getSuccessOrdering(); - CASI->setSuccessOrdering(Monotonic); - CASI->setFailureOrdering(Monotonic); + CASI->setSuccessOrdering(AtomicOrdering::Monotonic); + CASI->setFailureOrdering(AtomicOrdering::Monotonic); IsStore = IsLoad = true; } - if (FenceOrdering != Monotonic) { + if (FenceOrdering != AtomicOrdering::Monotonic) { MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad); } } @@ -520,7 +520,7 @@ // should preserve the ordering. bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI); AtomicOrdering MemOpOrder = - ShouldInsertFencesForAtomic ? Monotonic : SuccessOrder; + ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder; // In implementations which use a barrier to achieve release semantics, we can // delay emitting this barrier until we know a store is actually going to be @@ -532,8 +532,9 @@ // minimal loop. Unfortunately, this puts too much stress on later // optimisations so we avoid emitting the extra logic in those cases too. bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && - SuccessOrder != Monotonic && - SuccessOrder != Acquire && !F->optForMinSize(); + SuccessOrder != AtomicOrdering::Monotonic && + SuccessOrder != AtomicOrdering::Acquire && + !F->optForMinSize(); // There's no overhead for sinking the release barrier in a weak cmpxchg, so // do it even on minsize. @@ -767,8 +768,9 @@ CreateCmpXchgInstFun CreateCmpXchg) { assert(AI); - AtomicOrdering MemOpOrder = - AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering(); + AtomicOrdering MemOpOrder = AI->getOrdering() == AtomicOrdering::Unordered + ? AtomicOrdering::Monotonic + : AI->getOrdering(); Value *Addr = AI->getPointerOperand(); BasicBlock *BB = AI->getParent(); Function *F = BB->getParent(); Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3903,7 +3903,7 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Ops[3]; Ops[0] = getRoot(); - Ops[1] = DAG.getConstant(I.getOrdering(), dl, + Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl, TLI.getPointerTy(DAG.getDataLayout())); Ops[2] = DAG.getConstant(I.getSynchScope(), dl, TLI.getPointerTy(DAG.getDataLayout())); Index: llvm/trunk/lib/IR/AsmWriter.cpp =================================================================== --- llvm/trunk/lib/IR/AsmWriter.cpp +++ llvm/trunk/lib/IR/AsmWriter.cpp @@ -2110,7 +2110,7 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope) { - if (Ordering == NotAtomic) + if (Ordering == AtomicOrdering::NotAtomic) return; switch (SynchScope) { @@ -2118,46 +2118,22 @@ case CrossThread: break; } - switch (Ordering) { - default: Out << " "; break; - case Unordered: Out << " unordered"; break; - case Monotonic: Out << " monotonic"; break; - case Acquire: Out << " acquire"; break; - case Release: Out << " release"; break; - case AcquireRelease: Out << " acq_rel"; break; - case SequentiallyConsistent: Out << " seq_cst"; break; - } + Out << " " << toIRString(Ordering); } void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { - assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic); + assert(SuccessOrdering != AtomicOrdering::NotAtomic && + FailureOrdering != AtomicOrdering::NotAtomic); switch (SynchScope) { case SingleThread: Out << " singlethread"; break; case CrossThread: break; } - switch (SuccessOrdering) { - default: Out << " "; break; - case Unordered: Out << " unordered"; break; - case Monotonic: Out << " monotonic"; break; - case Acquire: Out << " acquire"; break; - case Release: Out << " release"; break; - case AcquireRelease: Out << " acq_rel"; break; - case SequentiallyConsistent: Out << " seq_cst"; break; - } - - switch (FailureOrdering) { - default: Out << " "; break; - case Unordered: Out << " unordered"; break; - case Monotonic: Out << " monotonic"; break; - case Acquire: Out << " acquire"; break; - case Release: Out << " release"; break; - case AcquireRelease: Out << " acq_rel"; break; - case SequentiallyConsistent: Out << " seq_cst"; break; - } + Out << " " << toIRString(SuccessOrdering); + Out << " " << toIRString(FailureOrdering); } void AssemblyWriter::writeParamOperand(const Value *Operand, Index: llvm/trunk/lib/IR/Core.cpp =================================================================== --- llvm/trunk/lib/IR/Core.cpp +++ llvm/trunk/lib/IR/Core.cpp @@ -2602,14 +2602,15 @@ static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) { switch (Ordering) { - case LLVMAtomicOrderingNotAtomic: return NotAtomic; - case LLVMAtomicOrderingUnordered: return Unordered; - case LLVMAtomicOrderingMonotonic: return Monotonic; - case LLVMAtomicOrderingAcquire: return Acquire; - case LLVMAtomicOrderingRelease: return Release; - case LLVMAtomicOrderingAcquireRelease: return AcquireRelease; + case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic; + case LLVMAtomicOrderingUnordered: return AtomicOrdering::Unordered; + case LLVMAtomicOrderingMonotonic: return AtomicOrdering::Monotonic; + case LLVMAtomicOrderingAcquire: return AtomicOrdering::Acquire; + case LLVMAtomicOrderingRelease: return AtomicOrdering::Release; + case LLVMAtomicOrderingAcquireRelease: + return AtomicOrdering::AcquireRelease; case LLVMAtomicOrderingSequentiallyConsistent: - return SequentiallyConsistent; + return AtomicOrdering::SequentiallyConsistent; } llvm_unreachable("Invalid LLVMAtomicOrdering value!"); @@ -2617,13 +2618,14 @@ static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return LLVMAtomicOrderingNotAtomic; - case Unordered: return LLVMAtomicOrderingUnordered; - case Monotonic: return LLVMAtomicOrderingMonotonic; - case Acquire: return LLVMAtomicOrderingAcquire; - case Release: return LLVMAtomicOrderingRelease; - case AcquireRelease: return LLVMAtomicOrderingAcquireRelease; - case SequentiallyConsistent: + case AtomicOrdering::NotAtomic: return LLVMAtomicOrderingNotAtomic; + case AtomicOrdering::Unordered: return LLVMAtomicOrderingUnordered; + case AtomicOrdering::Monotonic: return LLVMAtomicOrderingMonotonic; + case AtomicOrdering::Acquire: return LLVMAtomicOrderingAcquire; + case AtomicOrdering::Release: return LLVMAtomicOrderingRelease; + case AtomicOrdering::AcquireRelease: + return LLVMAtomicOrderingAcquireRelease; + case AtomicOrdering::SequentiallyConsistent: return LLVMAtomicOrderingSequentiallyConsistent; } Index: llvm/trunk/lib/IR/Instruction.cpp =================================================================== --- llvm/trunk/lib/IR/Instruction.cpp +++ llvm/trunk/lib/IR/Instruction.cpp @@ -461,9 +461,9 @@ case Instruction::Fence: return true; case Instruction::Load: - return cast(this)->getOrdering() != NotAtomic; + return cast(this)->getOrdering() != AtomicOrdering::NotAtomic; case Instruction::Store: - return cast(this)->getOrdering() != NotAtomic; + return cast(this)->getOrdering() != AtomicOrdering::NotAtomic; } } Index: llvm/trunk/lib/IR/Instructions.cpp =================================================================== --- llvm/trunk/lib/IR/Instructions.cpp +++ llvm/trunk/lib/IR/Instructions.cpp @@ -1209,13 +1209,13 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, Instruction *InsertBef) - : LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, - InsertBef) {} + : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertBef) {} LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, BasicBlock *InsertAE) - : LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) { -} + : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, AtomicOrdering Order, @@ -1247,7 +1247,7 @@ Load, Ptr, InsertBef) { setVolatile(false); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1257,7 +1257,7 @@ Load, Ptr, InsertAE) { setVolatile(false); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1268,7 +1268,7 @@ assert(Ty == cast(Ptr->getType())->getElementType()); setVolatile(isVolatile); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1279,7 +1279,7 @@ Load, Ptr, InsertAE) { setVolatile(isVolatile); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1324,13 +1324,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, Instruction *InsertBefore) - : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread, - InsertBefore) {} + : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertBefore) {} StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd) - : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread, - InsertAtEnd) {} + : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertAtEnd) {} StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, AtomicOrdering Order, @@ -1398,13 +1398,15 @@ assert(getOperand(2)->getType() == cast(getOperand(0)->getType())->getElementType() && "Ptr must be a pointer to NewVal type!"); - assert(SuccessOrdering != NotAtomic && + assert(SuccessOrdering != AtomicOrdering::NotAtomic && "AtomicCmpXchg instructions must be atomic!"); - assert(FailureOrdering != NotAtomic && + assert(FailureOrdering != AtomicOrdering::NotAtomic && "AtomicCmpXchg instructions must be atomic!"); - assert(SuccessOrdering >= FailureOrdering && - "AtomicCmpXchg success ordering must be at least as strong as fail"); - assert(FailureOrdering != Release && FailureOrdering != AcquireRelease && + assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && + "AtomicCmpXchg failure argument shall be no stronger than the success " + "argument"); + assert(FailureOrdering != AtomicOrdering::Release && + FailureOrdering != AtomicOrdering::AcquireRelease && "AtomicCmpXchg failure ordering cannot include release semantics"); } @@ -1454,7 +1456,7 @@ assert(getOperand(1)->getType() == cast(getOperand(0)->getType())->getElementType() && "Ptr must be a pointer to Val type!"); - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "AtomicRMW instructions must be atomic!"); } Index: llvm/trunk/lib/IR/Verifier.cpp =================================================================== --- llvm/trunk/lib/IR/Verifier.cpp +++ llvm/trunk/lib/IR/Verifier.cpp @@ -2919,7 +2919,8 @@ Assert(LI.getAlignment() <= Value::MaximumAlignment, "huge alignment values are unsupported", &LI); if (LI.isAtomic()) { - Assert(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease, + Assert(LI.getOrdering() != AtomicOrdering::Release && + LI.getOrdering() != AtomicOrdering::AcquireRelease, "Load cannot have Release ordering", &LI); Assert(LI.getAlignment() != 0, "Atomic load must specify explicit alignment", &LI); @@ -2946,7 +2947,8 @@ Assert(SI.getAlignment() <= Value::MaximumAlignment, "huge alignment values are unsupported", &SI); if (SI.isAtomic()) { - Assert(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease, + Assert(SI.getOrdering() != AtomicOrdering::Acquire && + SI.getOrdering() != AtomicOrdering::AcquireRelease, "Store cannot have Acquire ordering", &SI); Assert(SI.getAlignment() != 0, "Atomic store must specify explicit alignment", &SI); @@ -3022,19 +3024,20 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { // FIXME: more conditions??? - Assert(CXI.getSuccessOrdering() != NotAtomic, + Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic, "cmpxchg instructions must be atomic.", &CXI); - Assert(CXI.getFailureOrdering() != NotAtomic, + Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic, "cmpxchg instructions must be atomic.", &CXI); - Assert(CXI.getSuccessOrdering() != Unordered, + Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered, "cmpxchg instructions cannot be unordered.", &CXI); - Assert(CXI.getFailureOrdering() != Unordered, + Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered, "cmpxchg instructions cannot be unordered.", &CXI); - Assert(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(), - "cmpxchg instructions be at least as constrained on success as fail", + Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()), + "cmpxchg instructions failure argument shall be no stronger than the " + "success argument", &CXI); - Assert(CXI.getFailureOrdering() != Release && - CXI.getFailureOrdering() != AcquireRelease, + Assert(CXI.getFailureOrdering() != AtomicOrdering::Release && + CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease, "cmpxchg failure ordering cannot include release semantics", &CXI); PointerType *PTy = dyn_cast(CXI.getOperand(0)->getType()); @@ -3053,9 +3056,9 @@ } void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { - Assert(RMWI.getOrdering() != NotAtomic, + Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic, "atomicrmw instructions must be atomic.", &RMWI); - Assert(RMWI.getOrdering() != Unordered, + Assert(RMWI.getOrdering() != AtomicOrdering::Unordered, "atomicrmw instructions cannot be unordered.", &RMWI); PointerType *PTy = dyn_cast(RMWI.getOperand(0)->getType()); Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); @@ -3074,10 +3077,12 @@ void Verifier::visitFenceInst(FenceInst &FI) { const AtomicOrdering Ordering = FI.getOrdering(); - Assert(Ordering == Acquire || Ordering == Release || - Ordering == AcquireRelease || Ordering == SequentiallyConsistent, - "fence instructions may only have " - "acquire, release, acq_rel, or seq_cst ordering.", + Assert(Ordering == AtomicOrdering::Acquire || + Ordering == AtomicOrdering::Release || + Ordering == AtomicOrdering::AcquireRelease || + Ordering == AtomicOrdering::SequentiallyConsistent, + "fence instructions may only have acquire, release, acq_rel, or " + "seq_cst ordering.", &FI); visitInstruction(FI); } Index: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -608,7 +608,7 @@ // ldar and stlr have much more restrictive addressing modes (just a // register). - if (cast(Use)->getOrdering() > Monotonic) + if (isStrongerThanMonotonic(cast(Use)->getOrdering())) return false; } Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10132,7 +10132,7 @@ AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Type *ValTy = cast(Addr->getType())->getElementType(); - bool IsAcquire = isAtLeastAcquire(Ord); + bool IsAcquire = isAcquireOrStronger(Ord); // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i64, i64} and we have to recombine them into a @@ -10174,7 +10174,7 @@ Value *Val, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); - bool IsRelease = isAtLeastRelease(Ord); + bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i128 intrinsics take two // parameters: "i64, i64". We must marshal Val into the appropriate form Index: llvm/trunk/lib/Target/AArch64/AArch64InstrAtomics.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrAtomics.td +++ llvm/trunk/lib/Target/AArch64/AArch64InstrAtomics.td @@ -29,7 +29,7 @@ class acquiring_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return isAtLeastAcquire(Ordering); + return isAcquireOrStronger(Ordering); }]>; // An atomic load operation that does not need either acquire or release @@ -37,7 +37,7 @@ class relaxed_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return !isAtLeastAcquire(Ordering); + return !isAcquireOrStronger(Ordering); }]>; // 8-bit loads @@ -112,15 +112,16 @@ class releasing_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - assert(Ordering != AcquireRelease && "unexpected store ordering"); - return isAtLeastRelease(Ordering); + assert(Ordering != AtomicOrdering::AcquireRelease && + "unexpected store ordering"); + return isReleaseOrStronger(Ordering); }]>; // An atomic store operation that doesn't actually need to be atomic on AArch64. class relaxed_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return !isAtLeastRelease(Ordering); + return !isReleaseOrStronger(Ordering); }]>; // 8-bit stores Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp @@ -3011,7 +3011,7 @@ if (Subtarget->isMClass()) { // Only a full system barrier exists in the M-class architectures. Domain = ARM_MB::SY; - } else if (Subtarget->isSwift() && Ord == Release) { + } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) { // Swift happens to implement ISHST barriers in a way that's compatible with // Release semantics but weaker than ISH so we'd be fools not to use // it. Beware: other processors probably don't! @@ -6932,13 +6932,13 @@ } static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { - // Monotonic load/store is legal for all targets - if (cast(Op)->getOrdering() <= Monotonic) - return Op; + if (isStrongerThanMonotonic(cast(Op)->getOrdering())) + // Acquire/Release load/store is not legal for targets without a dmb or + // equivalent available. + return SDValue(); - // Acquire/Release load/store is not legal for targets without a - // dmb or equivalent available. - return SDValue(); + // Monotonic load/store is legal for all targets. + return Op; } static void ReplaceREADCYCLECOUNTER(SDNode *N, @@ -12076,18 +12076,18 @@ AtomicOrdering Ord, bool IsStore, bool IsLoad) const { switch (Ord) { - case NotAtomic: - case Unordered: + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: llvm_unreachable("Invalid fence: unordered/non-atomic"); - case Monotonic: - case Acquire: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: return nullptr; // Nothing to do - case SequentiallyConsistent: + case AtomicOrdering::SequentiallyConsistent: if (!IsStore) return nullptr; // Nothing to do /*FALLTHROUGH*/ - case Release: - case AcquireRelease: + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: if (Subtarget->isSwift()) return makeDMB(Builder, ARM_MB::ISHST); // FIXME: add a comment with a link to documentation justifying this. @@ -12101,15 +12101,15 @@ AtomicOrdering Ord, bool IsStore, bool IsLoad) const { switch (Ord) { - case NotAtomic: - case Unordered: + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: llvm_unreachable("Invalid fence: unordered/not-atomic"); - case Monotonic: - case Release: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: return nullptr; // Nothing to do - case Acquire: - case AcquireRelease: - case SequentiallyConsistent: + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::SequentiallyConsistent: return makeDMB(Builder, ARM_MB::ISH); } llvm_unreachable("Unknown fence ordering in emitTrailingFence"); @@ -12204,7 +12204,7 @@ AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Type *ValTy = cast(Addr->getType())->getElementType(); - bool IsAcquire = isAtLeastAcquire(Ord); + bool IsAcquire = isAcquireOrStronger(Ord); // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i32, i32} and we have to recombine them into a @@ -12248,7 +12248,7 @@ Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); - bool IsRelease = isAtLeastRelease(Ord); + bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i64 intrinsics take two // parameters: "i32, i32". We must marshal Val into the appropriate form Index: llvm/trunk/lib/Target/ARM/ARMInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/ARM/ARMInstrInfo.td +++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.td @@ -4761,7 +4761,7 @@ class acquiring_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return isAtLeastAcquire(Ordering); + return isAcquireOrStronger(Ordering); }]>; def atomic_load_acquire_8 : acquiring_load; @@ -4771,7 +4771,7 @@ class releasing_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return isAtLeastRelease(Ordering); + return isReleaseOrStronger(Ordering); }]>; def atomic_store_release_8 : releasing_store; Index: llvm/trunk/lib/Target/CppBackend/CPPBackend.cpp =================================================================== --- llvm/trunk/lib/Target/CppBackend/CPPBackend.cpp +++ llvm/trunk/lib/Target/CppBackend/CPPBackend.cpp @@ -1091,13 +1091,14 @@ static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return "NotAtomic"; - case Unordered: return "Unordered"; - case Monotonic: return "Monotonic"; - case Acquire: return "Acquire"; - case Release: return "Release"; - case AcquireRelease: return "AcquireRelease"; - case SequentiallyConsistent: return "SequentiallyConsistent"; + case AtomicOrdering::NotAtomic: return "NotAtomic"; + case AtomicOrdering::Unordered: return "Unordered"; + case AtomicOrdering::Monotonic: return "Monotonic"; + case AtomicOrdering::Acquire: return "Acquire"; + case AtomicOrdering::Release: return "Release"; + case AtomicOrdering::AcquireRelease: return "AcquireRelease"; + case AtomicOrdering::SequentiallyConsistent: + return "SequentiallyConsistent"; } llvm_unreachable("Unknown ordering"); } Index: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8323,9 +8323,9 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (Ord == SequentiallyConsistent) + if (Ord == AtomicOrdering::SequentiallyConsistent) return callIntrinsic(Builder, Intrinsic::ppc_sync); - if (isAtLeastRelease(Ord)) + if (isReleaseOrStronger(Ord)) return callIntrinsic(Builder, Intrinsic::ppc_lwsync); return nullptr; } @@ -8333,7 +8333,7 @@ Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (IsLoad && isAtLeastAcquire(Ord)) + if (IsLoad && isAcquireOrStronger(Ord)) return callIntrinsic(Builder, Intrinsic::ppc_lwsync); // FIXME: this is too conservative, a dependent branch + isync is enough. // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and Index: llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp +++ llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp @@ -2929,12 +2929,12 @@ } static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { - // Monotonic load/stores are legal. - if (cast(Op)->getOrdering() <= Monotonic) - return Op; - - // Otherwise, expand with a fence. + if (isStrongerThanMonotonic(cast(Op)->getOrdering())) + // Expand with a fence. return SDValue(); + + // Monotonic load/stores are legal. + return Op; } SDValue SparcTargetLowering:: Index: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp +++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3130,9 +3130,11 @@ // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. - if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { + if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && + FenceScope == CrossThread) { return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, - Op.getOperand(0)), 0); + Op.getOperand(0)), + 0); } // MEMBARRIER is a compiler barrier; it codegens to a no-op. Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -20464,7 +20464,7 @@ // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is // lowered to just a load without a fence. A mfence flushes the store buffer, // making the optimization clearly correct. - // FIXME: it is required if isAtLeastRelease(Order) but it is not clear + // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear // otherwise, we might be able to be more aggressive on relaxed idempotent // rmw. In practice, they do not look useful, so we don't try to be // especially clever. @@ -20503,7 +20503,8 @@ // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. - if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { + if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && + FenceScope == CrossThread) { if (Subtarget.hasMFence()) return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); @@ -20986,7 +20987,8 @@ // FIXME: On 32-bit, store -> fist or movq would be more efficient // (The only way to get a 16-byte store is cmpxchg16b) // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. - if (cast(Node)->getOrdering() == SequentiallyConsistent || + if (cast(Node)->getOrdering() == + AtomicOrdering::SequentiallyConsistent || !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, cast(Node)->getMemoryVT(), Index: llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp +++ llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp @@ -970,8 +970,9 @@ LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast(Op); assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); - assert(N->getOrdering() <= Monotonic && - "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + assert((N->getOrdering() == AtomicOrdering::Unordered || + N->getOrdering() == AtomicOrdering::Monotonic) && + "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) report_fatal_error("atomic load must be aligned"); @@ -1000,8 +1001,9 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast(Op); assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); - assert(N->getOrdering() <= Monotonic && - "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + assert((N->getOrdering() == AtomicOrdering::Unordered || + N->getOrdering() == AtomicOrdering::Monotonic) && + "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) report_fatal_error("atomic store must be aligned"); Index: llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp +++ llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp @@ -1503,7 +1503,7 @@ // into multiple malloc'd arrays, one for each field. This is basically // SRoA for malloc'd memory. - if (Ordering != NotAtomic) + if (Ordering != AtomicOrdering::NotAtomic) return false; // If this is an allocation of a fixed size array of structs, analyze as a @@ -1982,7 +1982,7 @@ // Otherwise, if the global was not a boolean, we can shrink it to be a // boolean. if (Constant *SOVConstant = dyn_cast(GS.StoredOnceValue)) { - if (GS.Ordering == NotAtomic) { + if (GS.Ordering == AtomicOrdering::NotAtomic) { if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { ++NumShrunkToBool; return true; @@ -2581,4 +2581,3 @@ return Changed; } - Index: llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp +++ llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp @@ -401,6 +401,7 @@ int cmpTypes(Type *TyL, Type *TyR) const; int cmpNumbers(uint64_t L, uint64_t R) const; + int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const; int cmpAPInts(const APInt &L, const APInt &R) const; int cmpAPFloats(const APFloat &L, const APFloat &R) const; int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const; @@ -477,6 +478,12 @@ return 0; } +int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const { + if ((int)L < (int)R) return -1; + if ((int)L > (int)R) return 1; + return 0; +} + int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const { if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth())) return Res; @@ -939,7 +946,7 @@ cmpNumbers(LI->getAlignment(), cast(R)->getAlignment())) return Res; if (int Res = - cmpNumbers(LI->getOrdering(), cast(R)->getOrdering())) + cmpOrderings(LI->getOrdering(), cast(R)->getOrdering())) return Res; if (int Res = cmpNumbers(LI->getSynchScope(), cast(R)->getSynchScope())) @@ -955,7 +962,7 @@ cmpNumbers(SI->getAlignment(), cast(R)->getAlignment())) return Res; if (int Res = - cmpNumbers(SI->getOrdering(), cast(R)->getOrdering())) + cmpOrderings(SI->getOrdering(), cast(R)->getOrdering())) return Res; return cmpNumbers(SI->getSynchScope(), cast(R)->getSynchScope()); } @@ -1009,7 +1016,7 @@ } if (const FenceInst *FI = dyn_cast(L)) { if (int Res = - cmpNumbers(FI->getOrdering(), cast(R)->getOrdering())) + cmpOrderings(FI->getOrdering(), cast(R)->getOrdering())) return Res; return cmpNumbers(FI->getSynchScope(), cast(R)->getSynchScope()); } @@ -1021,11 +1028,13 @@ if (int Res = cmpNumbers(CXI->isWeak(), cast(R)->isWeak())) return Res; - if (int Res = cmpNumbers(CXI->getSuccessOrdering(), - cast(R)->getSuccessOrdering())) + if (int Res = + cmpOrderings(CXI->getSuccessOrdering(), + cast(R)->getSuccessOrdering())) return Res; - if (int Res = cmpNumbers(CXI->getFailureOrdering(), - cast(R)->getFailureOrdering())) + if (int Res = + cmpOrderings(CXI->getFailureOrdering(), + cast(R)->getFailureOrdering())) return Res; return cmpNumbers(CXI->getSynchScope(), cast(R)->getSynchScope()); @@ -1037,7 +1046,7 @@ if (int Res = cmpNumbers(RMWI->isVolatile(), cast(R)->isVolatile())) return Res; - if (int Res = cmpNumbers(RMWI->getOrdering(), + if (int Res = cmpOrderings(RMWI->getOrdering(), cast(R)->getOrdering())) return Res; return cmpNumbers(RMWI->getSynchScope(), Index: llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1222,34 +1222,34 @@ AtomicOrdering addReleaseOrdering(AtomicOrdering a) { switch (a) { - case NotAtomic: - return NotAtomic; - case Unordered: - case Monotonic: - case Release: - return Release; - case Acquire: - case AcquireRelease: - return AcquireRelease; - case SequentiallyConsistent: - return SequentiallyConsistent; + case AtomicOrdering::NotAtomic: + return AtomicOrdering::NotAtomic; + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: + return AtomicOrdering::Release; + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + return AtomicOrdering::AcquireRelease; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; } llvm_unreachable("Unknown ordering"); } AtomicOrdering addAcquireOrdering(AtomicOrdering a) { switch (a) { - case NotAtomic: - return NotAtomic; - case Unordered: - case Monotonic: - case Acquire: - return Acquire; - case Release: - case AcquireRelease: - return AcquireRelease; - case SequentiallyConsistent: - return SequentiallyConsistent; + case AtomicOrdering::NotAtomic: + return AtomicOrdering::NotAtomic; + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: + return AtomicOrdering::Acquire; + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: + return AtomicOrdering::AcquireRelease; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; } llvm_unreachable("Unknown ordering"); } Index: llvm/trunk/lib/Transforms/Instrumentation/SanitizerCoverage.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -551,7 +551,7 @@ IRB.CreateCall(SanCovWithCheckFunction, GuardP); } else { LoadInst *Load = IRB.CreateLoad(GuardP); - Load->setAtomic(Monotonic); + Load->setAtomic(AtomicOrdering::Monotonic); Load->setAlignment(4); SetNoSanitizeMetadata(Load); Value *Cmp = Index: llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -480,14 +480,16 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { uint32_t v = 0; switch (ord) { - case NotAtomic: llvm_unreachable("unexpected atomic ordering!"); - case Unordered: // Fall-through. - case Monotonic: v = 0; break; - // case Consume: v = 1; break; // Not specified yet. - case Acquire: v = 2; break; - case Release: v = 3; break; - case AcquireRelease: v = 4; break; - case SequentiallyConsistent: v = 5; break; + case AtomicOrdering::NotAtomic: + llvm_unreachable("unexpected atomic ordering!"); + case AtomicOrdering::Unordered: // Fall-through. + case AtomicOrdering::Monotonic: v = 0; break; + // Not specified yet: + // case AtomicOrdering::Consume: v = 1; break; + case AtomicOrdering::Acquire: v = 2; break; + case AtomicOrdering::Release: v = 3; break; + case AtomicOrdering::AcquireRelease: v = 4; break; + case AtomicOrdering::SequentiallyConsistent: v = 5; break; } return IRB->getInt32(v); } Index: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp +++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp @@ -673,7 +673,7 @@ // to advance the generation. We do need to prevent DSE across the fence, // but that's handled above. if (FenceInst *FI = dyn_cast(Inst)) - if (FI->getOrdering() == Release) { + if (FI->getOrdering() == AtomicOrdering::Release) { assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above"); continue; } Index: llvm/trunk/lib/Transforms/Scalar/LowerAtomic.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LowerAtomic.cpp +++ llvm/trunk/lib/Transforms/Scalar/LowerAtomic.cpp @@ -100,12 +100,12 @@ } static bool LowerLoadInst(LoadInst *LI) { - LI->setAtomic(NotAtomic); + LI->setAtomic(AtomicOrdering::NotAtomic); return true; } static bool LowerStoreInst(StoreInst *SI) { - SI->setAtomic(NotAtomic); + SI->setAtomic(AtomicOrdering::NotAtomic); return true; } Index: llvm/trunk/lib/Transforms/Utils/GlobalStatus.cpp =================================================================== --- llvm/trunk/lib/Transforms/Utils/GlobalStatus.cpp +++ llvm/trunk/lib/Transforms/Utils/GlobalStatus.cpp @@ -20,11 +20,11 @@ /// and release, then return AcquireRelease. /// static AtomicOrdering strongerOrdering(AtomicOrdering X, AtomicOrdering Y) { - if (X == Acquire && Y == Release) - return AcquireRelease; - if (Y == Acquire && X == Release) - return AcquireRelease; - return (AtomicOrdering)std::max(X, Y); + if (X == AtomicOrdering::Acquire && Y == AtomicOrdering::Release) + return AtomicOrdering::AcquireRelease; + if (Y == AtomicOrdering::Acquire && X == AtomicOrdering::Release) + return AtomicOrdering::AcquireRelease; + return (AtomicOrdering)std::max((unsigned)X, (unsigned)Y); } /// It is safe to destroy a constant iff it is only used by constants itself. @@ -185,4 +185,4 @@ : IsCompared(false), IsLoaded(false), StoredType(NotStored), StoredOnceValue(nullptr), AccessingFunction(nullptr), HasMultipleAccessingFunctions(false), HasNonInstructionUser(false), - Ordering(NotAtomic) {} + Ordering(AtomicOrdering::NotAtomic) {} Index: llvm/trunk/unittests/Analysis/AliasAnalysisTest.cpp =================================================================== --- llvm/trunk/unittests/Analysis/AliasAnalysisTest.cpp +++ llvm/trunk/unittests/Analysis/AliasAnalysisTest.cpp @@ -179,12 +179,12 @@ auto *Load1 = new LoadInst(Addr, "load", BB); auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB); auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB); - auto *CmpXChg1 = new AtomicCmpXchgInst(Addr, ConstantInt::get(IntType, 0), - ConstantInt::get(IntType, 1), - Monotonic, Monotonic, CrossThread, BB); + auto *CmpXChg1 = new AtomicCmpXchgInst( + Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1), + AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB); auto *AtomicRMW = new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1), - Monotonic, CrossThread, BB); + AtomicOrdering::Monotonic, CrossThread, BB); ReturnInst::Create(C, nullptr, BB);