Index: include/llvm/Bitcode/LLVMBitCodes.h =================================================================== --- include/llvm/Bitcode/LLVMBitCodes.h +++ include/llvm/Bitcode/LLVMBitCodes.h @@ -369,12 +369,6 @@ ORDERING_SEQCST = 6 }; -/// Encoded SynchronizationScope values. -enum AtomicSynchScopeCodes { - SYNCHSCOPE_SINGLETHREAD = 0, - SYNCHSCOPE_CROSSTHREAD = 1 -}; - /// Markers and flags for call instruction. enum CallMarkersFlags { CALL_TAIL = 0, Index: include/llvm/CodeGen/SelectionDAG.h =================================================================== --- include/llvm/CodeGen/SelectionDAG.h +++ include/llvm/CodeGen/SelectionDAG.h @@ -856,29 +856,29 @@ SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope); + unsigned SynchScope); SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope); + unsigned SynchScope); /// Gets a node for an atomic op, produces result (if relevant) /// and chain and takes 2 operands. SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, - SynchronizationScope SynchScope); + unsigned SynchScope); SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO, - AtomicOrdering Ordering, SynchronizationScope SynchScope); + AtomicOrdering Ordering, unsigned SynchScope); /// Gets a node for an atomic op, produces result and chain and /// takes 1 operand. SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO, - AtomicOrdering Ordering, SynchronizationScope SynchScope); + AtomicOrdering Ordering, unsigned SynchScope); /// Gets a node for an atomic op, produces result and chain and takes N /// operands. @@ -886,11 +886,11 @@ SDVTList VTList, ArrayRef Ops, MachineMemOperand *MMO, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope); + unsigned SynchScope); SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTList, ArrayRef Ops, MachineMemOperand *MMO, AtomicOrdering Ordering, - SynchronizationScope SynchScope); + unsigned SynchScope); /// Creates a MemIntrinsicNode that may produce a /// result and takes a list of operands. Opcode may be INTRINSIC_VOID, Index: include/llvm/CodeGen/SelectionDAGNodes.h =================================================================== --- include/llvm/CodeGen/SelectionDAGNodes.h +++ include/llvm/CodeGen/SelectionDAGNodes.h @@ -1016,6 +1016,8 @@ protected: /// Memory reference information. MachineMemOperand *MMO; + /// \brief Synchronization scope of this memory operation. + unsigned SynchScope; public: MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, @@ -1048,9 +1050,9 @@ AtomicOrdering getOrdering() const { return AtomicOrdering((SubclassData >> 8) & 15); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope((SubclassData >> 12) & 1); - } + + /// \brief Returns synchronization scope of this memory operation. + unsigned getSynchScope() const { return this->SynchScope; } // Returns the offset from the location of the access. int64_t getSrcValueOffset() const { return MMO->getOffset(); } @@ -1129,7 +1131,7 @@ void InitAtomic(AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { // This must match encodeMemSDNodeFlags() in SelectionDAG.cpp. assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) == SuccessOrdering && @@ -1137,11 +1139,9 @@ assert((AtomicOrdering)((unsigned)FailureOrdering & 15) == FailureOrdering && "Ordering may not require more than 4 bits!"); - assert((SynchScope & 1) == SynchScope && - "SynchScope may not require more than 1 bit!"); SubclassData |= (unsigned)SuccessOrdering << 8; - SubclassData |= SynchScope << 12; this->FailureOrdering = FailureOrdering; + this->SynchScope = SynchScope; assert(getSuccessOrdering() == SuccessOrdering && "Ordering encoding error!"); assert(getFailureOrdering() == FailureOrdering && @@ -1153,7 +1153,7 @@ AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL, EVT MemVT, MachineMemOperand *MMO, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope) + unsigned SynchScope) : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) { InitAtomic(SuccessOrdering, FailureOrdering, SynchScope); } Index: include/llvm/IR/IRBuilder.h =================================================================== --- include/llvm/IR/IRBuilder.h +++ include/llvm/IR/IRBuilder.h @@ -1086,7 +1086,7 @@ return SI; } FenceInst *CreateFence(AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread, + unsigned SynchScope = SynchScope::System, const Twine &Name = "") { return Insert(new FenceInst(Context, Ordering, SynchScope), Name); } @@ -1094,13 +1094,13 @@ CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope = CrossThread) { + unsigned SynchScope = SynchScope::System) { return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering, SynchScope)); } AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread) { + unsigned SynchScope = SynchScope::System) { return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SynchScope)); } Value *CreateGEP(Value *Ptr, ArrayRef IdxList, Index: include/llvm/IR/Instructions.h =================================================================== --- include/llvm/IR/Instructions.h +++ include/llvm/IR/Instructions.h @@ -37,10 +37,16 @@ class DataLayout; class LLVMContext; -enum SynchronizationScope { - SingleThread = 0, - CrossThread = 1 +/// \brief Predefined synchronization scopes. Each target can have additional, +/// target-specific synchronization scopes. +namespace SynchScope { +enum PredefinedSynchScope : unsigned { + /// Synchronized with respect to signal handlers executing in the same thread. + SingleThread = ~0U, + /// Synchronized with respect to all concurrently executing threads. + System = 0 }; +} // End SynchScope namespace //===----------------------------------------------------------------------===// // AllocaInst Class @@ -189,17 +195,17 @@ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, - AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread, + AtomicOrdering Order, unsigned SynchScope = SynchScope::System, Instruction *InsertBefore = nullptr) : LoadInst(cast(Ptr->getType())->getElementType(), Ptr, NameStr, isVolatile, Align, Order, SynchScope, InsertBefore) {} LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope = CrossThread, + unsigned SynchScope = SynchScope::System, Instruction *InsertBefore = nullptr); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); @@ -246,20 +252,19 @@ ((unsigned)Ordering << 7)); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); + /// \brief Returns synchronization scope of this instruction. + unsigned getSynchScope() const { + return this->SynchScope; } - /// Specify whether this load is ordered with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope xthread) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | - (xthread << 6)); + /// \brief Sets synchronization scope of this instruction. + void setSynchScope(unsigned SynchScope) { + this->SynchScope = SynchScope; } + /// \brief Sets atomic ordering and synchronization scope of this instruction. void setAtomic(AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread) { + unsigned SynchScope = SynchScope::System) { setOrdering(Ordering); setSynchScope(SynchScope); } @@ -294,6 +299,9 @@ void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// \brief Synchronization scope of this instruction. + unsigned SynchScope; }; //===----------------------------------------------------------------------===// @@ -327,11 +335,11 @@ unsigned Align, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope = CrossThread, + unsigned SynchScope = SynchScope::System, Instruction *InsertBefore = nullptr); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd); /// isVolatile - Return true if this is a store to a volatile memory @@ -369,20 +377,19 @@ ((unsigned)Ordering << 7)); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); + /// \brief Returns synchronization scope of this instruction. + unsigned getSynchScope() const { + return this->SynchScope; } - /// Specify whether this store instruction is ordered with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope xthread) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | - (xthread << 6)); + /// \brief Sets synchronization scope of this instruction. + void setSynchScope(unsigned SynchScope) { + this->SynchScope = SynchScope; } + /// \brief Sets atomic ordering and synchronization scope of this instruction. void setAtomic(AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread) { + unsigned SynchScope = SynchScope::System) { setOrdering(Ordering); setSynchScope(SynchScope); } @@ -420,6 +427,9 @@ void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// \brief Synchronization scope of this instruction. + unsigned SynchScope; }; template <> @@ -436,7 +446,7 @@ /// class FenceInst : public Instruction { void *operator new(size_t, unsigned) = delete; - void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope); + void Init(AtomicOrdering Ordering, unsigned SynchScope); protected: // Note: Instruction needs to be a friend here to call cloneImpl. @@ -452,10 +462,10 @@ // Ordering may only be Acquire, Release, AcquireRelease, or // SequentiallyConsistent. FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread, + unsigned SynchScope = SynchScope::System, Instruction *InsertBefore = nullptr); FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd); /// Returns the ordering effect of this fence. @@ -470,16 +480,14 @@ ((unsigned)Ordering << 1)); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope(getSubclassDataFromInstruction() & 1); + /// \brief Returns synchronization scope of this instruction. + unsigned getSynchScope() const { + return this->SynchScope; } - /// Specify whether this fence orders other operations with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope xthread) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - xthread); + /// \brief Sets synchronization scope of this instruction. + void setSynchScope(unsigned SynchScope) { + this->SynchScope = SynchScope; } // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -496,6 +504,9 @@ void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// \brief Synchronization scope of this instruction. + unsigned SynchScope; }; //===----------------------------------------------------------------------===// @@ -510,7 +521,7 @@ void *operator new(size_t, unsigned) = delete; void Init(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope); + unsigned SynchScope); protected: // Note: Instruction needs to be a friend here to call cloneImpl. @@ -525,12 +536,12 @@ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope, + unsigned SynchScope, Instruction *InsertBefore = nullptr); AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd); /// isVolatile - Return true if this is a cmpxchg from a volatile memory @@ -575,12 +586,9 @@ ((unsigned)Ordering << 5)); } - /// Specify whether this cmpxchg is atomic and orders other operations with - /// respect to all concurrently executing threads, or only with respect to - /// signal handlers executing in the same thread. - void setSynchScope(SynchronizationScope SynchScope) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) | - (SynchScope << 1)); + /// \brief Sets synchronization scope of this instruction. + void setSynchScope(unsigned SynchScope) { + this->SynchScope = SynchScope; } /// Returns the ordering constraint on this cmpxchg. @@ -593,10 +601,9 @@ return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); } - /// Returns whether this cmpxchg is atomic between threads or only within a - /// single thread. - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1); + /// \brief Returns synchronization scope of this instruction. + unsigned getSynchScope() const { + return this->SynchScope; } Value *getPointerOperand() { return getOperand(0); } @@ -651,6 +658,9 @@ void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// \brief Synchronization scope of this instruction. + unsigned SynchScope; }; template <> @@ -715,10 +725,10 @@ return User::operator new(s, 2); } AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, - AtomicOrdering Ordering, SynchronizationScope SynchScope, + AtomicOrdering Ordering, unsigned SynchScope, Instruction *InsertBefore = nullptr); AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, - AtomicOrdering Ordering, SynchronizationScope SynchScope, + AtomicOrdering Ordering, unsigned SynchScope, BasicBlock *InsertAtEnd); BinOp getOperation() const { @@ -755,12 +765,9 @@ ((unsigned)Ordering << 2)); } - /// Specify whether this RMW orders other operations with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope SynchScope) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) | - (SynchScope << 1)); + /// \brief Sets synchronization scope of this instruction. + void setSynchScope(unsigned SynchScope) { + this->SynchScope = SynchScope; } /// Returns the ordering constraint on this RMW. @@ -768,10 +775,9 @@ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); } - /// Returns whether this RMW is atomic between threads or only within a - /// single thread. - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1); + /// \brief Returns synchronization scope of this instruction. + unsigned getSynchScope() const { + return this->SynchScope; } Value *getPointerOperand() { return getOperand(0); } @@ -796,12 +802,16 @@ private: void Init(BinOp Operation, Value *Ptr, Value *Val, - AtomicOrdering Ordering, SynchronizationScope SynchScope); + AtomicOrdering Ordering, unsigned SynchScope); + // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// \brief Synchronization scope of this instruction. + unsigned SynchScope; }; template <> Index: lib/AsmParser/LLLexer.cpp =================================================================== --- lib/AsmParser/LLLexer.cpp +++ lib/AsmParser/LLLexer.cpp @@ -547,6 +547,7 @@ KEYWORD(acq_rel); KEYWORD(seq_cst); KEYWORD(singlethread); + KEYWORD(synchscope); KEYWORD(nnan); KEYWORD(ninf); Index: lib/AsmParser/LLParser.h =================================================================== --- lib/AsmParser/LLParser.h +++ lib/AsmParser/LLParser.h @@ -237,8 +237,9 @@ bool ParseOptionalCallingConv(unsigned &CC); bool ParseOptionalAlignment(unsigned &Alignment); bool ParseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes); - bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope, + bool ParseScopeAndOrdering(bool isAtomic, unsigned &Scope, AtomicOrdering &Ordering); + bool ParseScope(unsigned &Scope); bool ParseOrdering(AtomicOrdering &Ordering); bool ParseOptionalStackAlignment(unsigned &Alignment); bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma); Index: lib/AsmParser/LLParser.cpp =================================================================== --- lib/AsmParser/LLParser.cpp +++ lib/AsmParser/LLParser.cpp @@ -1868,20 +1868,36 @@ } /// ParseScopeAndOrdering -/// if isAtomic: ::= 'singlethread'? AtomicOrdering -/// else: ::= +/// if isAtomic: +/// ::= 'singlethread' or 'synchscope' '(' uint32 ')'? AtomicOrdering +/// else +/// ::= /// /// This sets Scope and Ordering to the parsed values. -bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope, +bool LLParser::ParseScopeAndOrdering(bool isAtomic, unsigned &Scope, AtomicOrdering &Ordering) { if (!isAtomic) return false; + return ParseScope(Scope) || ParseOrdering(Ordering); +} - Scope = CrossThread; +/// ParseScope +/// ::= /* empty */ +/// ::= 'singlethread' +/// ::= 'synchscope' '(' uint32 ')' +/// +/// This sets Scope to the parsed value. +bool LLParser::ParseScope(unsigned &Scope) { + if (EatIfPresent(lltok::kw_synchscope)) + return ParseToken(lltok::lparen, "expected '(' in synchscope") || + ParseUInt32(Scope) || + ParseToken(lltok::rparen, "expected ')' in synchscope"); + + Scope = SynchScope::System; if (EatIfPresent(lltok::kw_singlethread)) - Scope = SingleThread; + Scope = SynchScope::SingleThread; - return ParseOrdering(Ordering); + return false; } /// ParseOrdering @@ -6003,7 +6019,7 @@ bool AteExtraComma = false; bool isAtomic = false; AtomicOrdering Ordering = AtomicOrdering::NotAtomic; - SynchronizationScope Scope = CrossThread; + unsigned Scope = SynchScope::System; if (Lex.getKind() == lltok::kw_atomic) { isAtomic = true; @@ -6052,7 +6068,7 @@ bool AteExtraComma = false; bool isAtomic = false; AtomicOrdering Ordering = AtomicOrdering::NotAtomic; - SynchronizationScope Scope = CrossThread; + unsigned Scope = SynchScope::System; if (Lex.getKind() == lltok::kw_atomic) { isAtomic = true; @@ -6096,7 +6112,7 @@ bool AteExtraComma = false; AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic; AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; - SynchronizationScope Scope = CrossThread; + unsigned Scope = SynchScope::System; bool isVolatile = false; bool isWeak = false; @@ -6148,7 +6164,7 @@ Value *Ptr, *Val; LocTy PtrLoc, ValLoc; bool AteExtraComma = false; AtomicOrdering Ordering = AtomicOrdering::NotAtomic; - SynchronizationScope Scope = CrossThread; + unsigned Scope = SynchScope::System; bool isVolatile = false; AtomicRMWInst::BinOp Operation; @@ -6201,7 +6217,7 @@ /// ::= 'fence' 'singlethread'? AtomicOrdering int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) { AtomicOrdering Ordering = AtomicOrdering::NotAtomic; - SynchronizationScope Scope = CrossThread; + unsigned Scope = SynchScope::System; if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering)) return true; Index: lib/AsmParser/LLToken.h =================================================================== --- lib/AsmParser/LLToken.h +++ lib/AsmParser/LLToken.h @@ -94,6 +94,8 @@ kw_acq_rel, kw_seq_cst, kw_singlethread, + kw_synchscope, + kw_nnan, kw_ninf, kw_nsz, Index: lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- lib/Bitcode/Reader/BitcodeReader.cpp +++ lib/Bitcode/Reader/BitcodeReader.cpp @@ -858,12 +858,8 @@ } } -static SynchronizationScope getDecodedSynchScope(unsigned Val) { - switch (Val) { - case bitc::SYNCHSCOPE_SINGLETHREAD: return SingleThread; - default: // Map unknown scopes to cross-thread. - case bitc::SYNCHSCOPE_CROSSTHREAD: return CrossThread; - } +static unsigned getDecodedSynchScope(unsigned Val) { + return Val - 1; } static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) { @@ -5317,7 +5313,7 @@ return error("Invalid record"); if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) return error("Invalid record"); - SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); + unsigned SynchScope = getDecodedSynchScope(Record[OpNum + 3]); unsigned Align; if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align)) @@ -5373,7 +5369,7 @@ Ordering == AtomicOrdering::Acquire || Ordering == AtomicOrdering::AcquireRelease) return error("Invalid record"); - SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); + unsigned SynchScope = getDecodedSynchScope(Record[OpNum + 3]); if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) return error("Invalid record"); @@ -5403,7 +5399,7 @@ if (SuccessOrdering == AtomicOrdering::NotAtomic || SuccessOrdering == AtomicOrdering::Unordered) return error("Invalid record"); - SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]); + unsigned SynchScope = getDecodedSynchScope(Record[OpNum + 2]); if (std::error_code EC = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType())) @@ -5450,7 +5446,7 @@ if (Ordering == AtomicOrdering::NotAtomic || Ordering == AtomicOrdering::Unordered) return error("Invalid record"); - SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); + unsigned SynchScope = getDecodedSynchScope(Record[OpNum + 3]); I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope); cast(I)->setVolatile(Record[OpNum+1]); InstructionList.push_back(I); @@ -5464,7 +5460,7 @@ Ordering == AtomicOrdering::Unordered || Ordering == AtomicOrdering::Monotonic) return error("Invalid record"); - SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]); + unsigned SynchScope = getDecodedSynchScope(Record[1]); I = new FenceInst(Context, Ordering, SynchScope); InstructionList.push_back(I); break; Index: lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- lib/Bitcode/Writer/BitcodeWriter.cpp +++ lib/Bitcode/Writer/BitcodeWriter.cpp @@ -589,12 +589,8 @@ llvm_unreachable("Invalid ordering"); } -static unsigned getEncodedSynchScope(SynchronizationScope SynchScope) { - switch (SynchScope) { - case SingleThread: return bitc::SYNCHSCOPE_SINGLETHREAD; - case CrossThread: return bitc::SYNCHSCOPE_CROSSTHREAD; - } - llvm_unreachable("Invalid synch scope"); +static unsigned getEncodedSynchScope(unsigned SynchScope) { + return SynchScope + 1; } void ModuleBitcodeWriter::writeStringRecord(unsigned Code, StringRef Str, Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -4799,7 +4799,7 @@ MachineMemOperand *MMO, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { FoldingSetNodeID ID; ID.AddInteger(MemVT.getRawBits()); AddNodeIDNode(ID, Opcode, VTList, Ops); @@ -4823,7 +4823,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTList, ArrayRef Ops, MachineMemOperand *MMO, AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering, Ordering, SynchScope); } @@ -4832,7 +4832,7 @@ unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment, AtomicOrdering SuccessOrdering, - AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { + AtomicOrdering FailureOrdering, unsigned SynchScope) { assert(Opcode == ISD::ATOMIC_CMP_SWAP || Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); @@ -4859,7 +4859,7 @@ MachineMemOperand *MMO, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { assert(Opcode == ISD::ATOMIC_CMP_SWAP || Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); @@ -4873,7 +4873,7 @@ SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(MemVT); @@ -4901,7 +4901,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO, AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { assert((Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB || Opcode == ISD::ATOMIC_LOAD_AND || @@ -4927,7 +4927,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO, AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); SDVTList VTs = getVTList(VT, MVT::Other); @@ -6702,7 +6702,8 @@ MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *mmo) - : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { + : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo), + SynchScope(SynchScope::System) { SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), MMO->isNonTemporal(), MMO->isInvariant()); assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3916,7 +3916,7 @@ SDLoc dl = getCurSDLoc(); AtomicOrdering SuccessOrder = I.getSuccessOrdering(); AtomicOrdering FailureOrder = I.getFailureOrdering(); - SynchronizationScope Scope = I.getSynchScope(); + unsigned Scope = I.getSynchScope(); SDValue InChain = getRoot(); @@ -3952,7 +3952,7 @@ case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; } AtomicOrdering Order = I.getOrdering(); - SynchronizationScope Scope = I.getSynchScope(); + unsigned Scope = I.getSynchScope(); SDValue InChain = getRoot(); @@ -3986,7 +3986,7 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { SDLoc dl = getCurSDLoc(); AtomicOrdering Order = I.getOrdering(); - SynchronizationScope Scope = I.getSynchScope(); + unsigned Scope = I.getSynchScope(); SDValue InChain = getRoot(); @@ -4021,7 +4021,7 @@ SDLoc dl = getCurSDLoc(); AtomicOrdering Order = I.getOrdering(); - SynchronizationScope Scope = I.getSynchScope(); + unsigned Scope = I.getSynchScope(); SDValue InChain = getRoot(); Index: lib/IR/AsmWriter.cpp =================================================================== --- lib/IR/AsmWriter.cpp +++ lib/IR/AsmWriter.cpp @@ -2080,10 +2080,10 @@ void writeOperand(const Value *Op, bool PrintType); void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx); void writeOperandBundles(ImmutableCallSite CS); - void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope); + void writeAtomic(AtomicOrdering Ordering, unsigned SynchScope); void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope); + unsigned SynchScope); void writeAllMDNodes(); void writeMDNode(unsigned Slot, const MDNode *Node); @@ -2145,13 +2145,19 @@ } void AssemblyWriter::writeAtomic(AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { if (Ordering == AtomicOrdering::NotAtomic) return; switch (SynchScope) { - case SingleThread: Out << " singlethread"; break; - case CrossThread: break; + case SynchScope::SingleThread: + Out << " singlethread"; + break; + case SynchScope::System: + break; + default: + Out << " synchscope(" << SynchScope << ")"; + break; } Out << " " << toIRString(Ordering); @@ -2159,13 +2165,19 @@ void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { assert(SuccessOrdering != AtomicOrdering::NotAtomic && FailureOrdering != AtomicOrdering::NotAtomic); switch (SynchScope) { - case SingleThread: Out << " singlethread"; break; - case CrossThread: break; + case SynchScope::SingleThread: + Out << " singlethread"; + break; + case SynchScope::System: + break; + default: + Out << " synchscope(" << SynchScope << ")"; + break; } Out << " " << toIRString(SuccessOrdering); Index: lib/IR/Core.cpp =================================================================== --- lib/IR/Core.cpp +++ lib/IR/Core.cpp @@ -2796,10 +2796,8 @@ LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering, LLVMBool isSingleThread, const char *Name) { - return wrap( - unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering), - isSingleThread ? SingleThread : CrossThread, - Name)); + return wrap(unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering), + isSingleThread ? SynchScope::SingleThread : SynchScope::System, Name)); } LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer, @@ -3080,7 +3078,8 @@ case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break; } return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val), - mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread)); + mapFromLLVMOrdering(ordering), + singleThread ? SynchScope::SingleThread : SynchScope::System)); } LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr, @@ -3092,7 +3091,7 @@ return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(Ptr), unwrap(Cmp), unwrap(New), mapFromLLVMOrdering(SuccessOrdering), mapFromLLVMOrdering(FailureOrdering), - singleThread ? SingleThread : CrossThread)); + singleThread ? SynchScope::SingleThread : SynchScope::System)); } @@ -3100,13 +3099,15 @@ Value *P = unwrap(AtomicInst); if (AtomicRMWInst *I = dyn_cast(P)) - return I->getSynchScope() == SingleThread; - return cast(P)->getSynchScope() == SingleThread; + return I->getSynchScope() == SynchScope::SingleThread; + return cast(P)->getSynchScope() == + SynchScope::SingleThread; } void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool NewValue) { Value *P = unwrap(AtomicInst); - SynchronizationScope Sync = NewValue ? SingleThread : CrossThread; + unsigned Sync = + NewValue ? SynchScope::SingleThread : SynchScope::System; if (AtomicRMWInst *I = dyn_cast(P)) return I->setSynchScope(Sync); Index: lib/IR/Instructions.cpp =================================================================== --- lib/IR/Instructions.cpp +++ lib/IR/Instructions.cpp @@ -1340,16 +1340,16 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, Instruction *InsertBef) : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, - CrossThread, InsertBef) {} + SynchScope::System, InsertBef) {} LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, BasicBlock *InsertAE) : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, - CrossThread, InsertAE) {} + SynchScope::System, InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, Instruction *InsertBef) + unsigned SynchScope, Instruction *InsertBef) : UnaryInstruction(Ty, Load, Ptr, InsertBef) { assert(Ty == cast(Ptr->getType())->getElementType()); setVolatile(isVolatile); @@ -1361,7 +1361,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAE) : UnaryInstruction(cast(Ptr->getType())->getElementType(), Load, Ptr, InsertAE) { @@ -1455,16 +1455,16 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, Instruction *InsertBefore) : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, - CrossThread, InsertBefore) {} + SynchScope::System, InsertBefore) {} StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd) : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, - CrossThread, InsertAtEnd) {} + SynchScope::System, InsertAtEnd) {} StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned SynchScope, Instruction *InsertBefore) : Instruction(Type::getVoidTy(val->getContext()), Store, OperandTraits::op_begin(this), @@ -1480,7 +1480,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd) : Instruction(Type::getVoidTy(val->getContext()), Store, OperandTraits::op_begin(this), @@ -1510,7 +1510,7 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { Op<0>() = Ptr; Op<1>() = Cmp; Op<2>() = NewVal; @@ -1543,7 +1543,7 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope, + unsigned SynchScope, Instruction *InsertBefore) : Instruction( StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()), @@ -1556,7 +1556,7 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd) : Instruction( StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()), @@ -1572,7 +1572,7 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + unsigned SynchScope) { Op<0>() = Ptr; Op<1>() = Val; setOperation(Operation); @@ -1592,7 +1592,7 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, AtomicOrdering Ordering, - SynchronizationScope SynchScope, + unsigned SynchScope, Instruction *InsertBefore) : Instruction(Val->getType(), AtomicRMW, OperandTraits::op_begin(this), @@ -1603,7 +1603,7 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, AtomicOrdering Ordering, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd) : Instruction(Val->getType(), AtomicRMW, OperandTraits::op_begin(this), @@ -1617,7 +1617,7 @@ //===----------------------------------------------------------------------===// FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SynchronizationScope SynchScope, + unsigned SynchScope, Instruction *InsertBefore) : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { setOrdering(Ordering); @@ -1625,7 +1625,7 @@ } FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SynchronizationScope SynchScope, + unsigned SynchScope, BasicBlock *InsertAtEnd) : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { setOrdering(Ordering); Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -2993,7 +2993,7 @@ ElTy, &LI); checkAtomicMemAccessSize(M, ElTy, &LI); } else { - Assert(LI.getSynchScope() == CrossThread, + Assert(LI.getSynchScope() == SynchScope::System, "Non-atomic load cannot have SynchronizationScope specified", &LI); } @@ -3022,7 +3022,7 @@ ElTy, &SI); checkAtomicMemAccessSize(M, ElTy, &SI); } else { - Assert(SI.getSynchScope() == CrossThread, + Assert(SI.getSynchScope() == SynchScope::System, "Non-atomic store cannot have SynchronizationScope specified", &SI); } visitInstruction(SI); Index: lib/Target/AMDGPU/AMDGPU.h =================================================================== --- lib/Target/AMDGPU/AMDGPU.h +++ lib/Target/AMDGPU/AMDGPU.h @@ -168,7 +168,33 @@ // Some places use this if the address space can't be determined. UNKNOWN_ADDRESS_SPACE = ~0u }; - } // namespace AMDGPUAS +/// \brief AMDGPU-specific synchronization scopes. Predefined synchronization +/// scopes are treated as follows: +/// +/// SynchScope::SingleThread - Synchronized with respect to the executing +/// work-item. +/// +/// SynchScope::System - Synchronized with respect to the entire system, which +/// includes all work-items on all kernel agents executing kernel dispatches +/// for the same application process, together with all agents executing the +/// same application process as the executing work-item. Only supported for +/// the global segment. +namespace AMDGPUSynchScope { +enum TargetSynchScope { + /// Synchronized with respect to the kernel agent, which includes all + /// work-items on the same kernel agent executing kernel dispatches for the + /// same application process as the executing work-item. Only supported for + /// the global segment. + KernelAgent = 2, + /// Synchronized with respect to the work-group, which includes all work-items + /// in the same work-group as the executing work-item. + WorkGroup = 3, + /// Synchronized with respect to the wave-front, which includes all work-items + /// in the same wavefront as the executing work-item. + WaveFront = 4 +}; +} // End AMDGPUSynchScope namespace + #endif Index: lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.cpp +++ lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3150,13 +3150,13 @@ SDLoc DL(Op); AtomicOrdering FenceOrdering = static_cast( cast(Op.getOperand(1))->getZExtValue()); - SynchronizationScope FenceScope = static_cast( + unsigned FenceScope = static_cast( cast(Op.getOperand(2))->getZExtValue()); // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && - FenceScope == CrossThread) { + FenceScope == SynchScope::System) { return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, Op.getOperand(0)), 0); Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -20704,7 +20704,7 @@ // otherwise, we might be able to be more aggressive on relaxed idempotent // rmw. In practice, they do not look useful, so we don't try to be // especially clever. - if (SynchScope == SingleThread) + if (SynchScope == SynchScope::SingleThread) // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at // the IR level, so we must wrap it in an intrinsic. return nullptr; @@ -20734,13 +20734,13 @@ SDLoc dl(Op); AtomicOrdering FenceOrdering = static_cast( cast(Op.getOperand(1))->getZExtValue()); - SynchronizationScope FenceScope = static_cast( + unsigned FenceScope = static_cast( cast(Op.getOperand(2))->getZExtValue()); // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && - FenceScope == CrossThread) { + FenceScope == SynchScope::System) { if (Subtarget.hasMFence()) return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); Index: lib/Transforms/Instrumentation/ThreadSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -366,9 +366,9 @@ static bool isAtomic(Instruction *I) { if (LoadInst *LI = dyn_cast(I)) - return LI->isAtomic() && LI->getSynchScope() == CrossThread; + return LI->isAtomic() && LI->getSynchScope() == SynchScope::System; if (StoreInst *SI = dyn_cast(I)) - return SI->isAtomic() && SI->getSynchScope() == CrossThread; + return SI->isAtomic() && SI->getSynchScope() == SynchScope::System; if (isa(I)) return true; if (isa(I)) @@ -653,7 +653,7 @@ I->eraseFromParent(); } else if (FenceInst *FI = dyn_cast(I)) { Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; - Function *F = FI->getSynchScope() == SingleThread ? + Function *F = FI->getSynchScope() == SynchScope::SingleThread ? TsanAtomicSignalFence : TsanAtomicThreadFence; CallInst *C = CallInst::Create(F, Args); ReplaceInstWithInst(I, C); Index: test/Assembler/atomic.ll =================================================================== --- test/Assembler/atomic.ll +++ test/Assembler/atomic.ll @@ -7,12 +7,18 @@ load atomic i32, i32* %x unordered, align 4 ; CHECK: load atomic volatile i32, i32* %x singlethread acquire, align 4 load atomic volatile i32, i32* %x singlethread acquire, align 4 + ; CHECK: load atomic volatile i32, i32* %x synchscope(3) acquire, align 4 + load atomic volatile i32, i32* %x synchscope(3) acquire, align 4 ; CHECK: store atomic i32 3, i32* %x release, align 4 store atomic i32 3, i32* %x release, align 4 ; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4 store atomic volatile i32 3, i32* %x singlethread monotonic, align 4 + ; CHECK: store atomic volatile i32 3, i32* %x synchscope(3) monotonic, align 4 + store atomic volatile i32 3, i32* %x synchscope(3) monotonic, align 4 ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic + ; CHECK: cmpxchg i32* %x, i32 1, i32 0 synchscope(3) monotonic monotonic + cmpxchg i32* %x, i32 1, i32 0 synchscope(3) monotonic monotonic ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic @@ -23,6 +29,8 @@ atomicrmw add i32* %x, i32 10 seq_cst ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic atomicrmw volatile xchg i32* %x, i32 10 monotonic + ; CHECK: fence synchscope(3) release + fence synchscope(3) release ; CHECK: fence singlethread release fence singlethread release ; CHECK: fence seq_cst Index: unittests/Analysis/AliasAnalysisTest.cpp =================================================================== --- unittests/Analysis/AliasAnalysisTest.cpp +++ unittests/Analysis/AliasAnalysisTest.cpp @@ -180,10 +180,11 @@ auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB); auto *CmpXChg1 = new AtomicCmpXchgInst( Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1), - AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB); + AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, SynchScope::System, + BB); auto *AtomicRMW = new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1), - AtomicOrdering::Monotonic, CrossThread, BB); + AtomicOrdering::Monotonic, SynchScope::System, BB); ReturnInst::Create(C, nullptr, BB);