diff --git a/llvm/include/llvm/CodeGen/MachineMemOperand.h b/llvm/include/llvm/CodeGen/MachineMemOperand.h --- a/llvm/include/llvm/CodeGen/MachineMemOperand.h +++ b/llvm/include/llvm/CodeGen/MachineMemOperand.h @@ -245,7 +245,7 @@ /// Return the atomic ordering requirements for this memory operation. For /// cmpxchg atomic operations, return the atomic ordering requirements when /// store occurs. - AtomicOrdering getOrdering() const { + AtomicOrdering getSuccessOrdering() const { return static_cast(AtomicInfo.Ordering); } @@ -257,9 +257,9 @@ /// Return a single atomic ordering that is at least as strong as both the /// success and failure orderings for an atomic operation. (For operations - /// other than cmpxchg, this is equivalent to getOrdering().) + /// other than cmpxchg, this is equivalent to getSuccessOrdering().) AtomicOrdering getMergedOrdering() const { - AtomicOrdering Ordering = getOrdering(); + AtomicOrdering Ordering = getSuccessOrdering(); AtomicOrdering FailureOrdering = getFailureOrdering(); if (FailureOrdering == AtomicOrdering::SequentiallyConsistent) return AtomicOrdering::SequentiallyConsistent; @@ -281,14 +281,16 @@ /// Returns true if this operation has an atomic ordering requirement of /// unordered or higher, false otherwise. - bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; } + bool isAtomic() const { + return getSuccessOrdering() != AtomicOrdering::NotAtomic; + } /// Returns true if this memory operation doesn't have any ordering /// constraints other than normal aliasing. Volatile and (ordered) atomic /// memory operations can't be reordered. bool isUnordered() const { - return (getOrdering() == AtomicOrdering::NotAtomic || - getOrdering() == AtomicOrdering::Unordered) && + return (getSuccessOrdering() == AtomicOrdering::NotAtomic || + getSuccessOrdering() == AtomicOrdering::Unordered) && !isVolatile(); } diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1304,11 +1304,13 @@ /// Return the atomic ordering requirements for this memory operation. For /// cmpxchg atomic operations, return the atomic ordering requirements when /// store occurs. - AtomicOrdering getOrdering() const { return MMO->getOrdering(); } + AtomicOrdering getSuccessOrdering() const { + return MMO->getSuccessOrdering(); + } /// Return a single atomic ordering that is at least as strong as both the /// success and failure orderings for an atomic operation. (For operations - /// other than cmpxchg, this is equivalent to getOrdering().) + /// other than cmpxchg, this is equivalent to getSuccessOrdering().) AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); } /// Return true if the memory operation ordering is Unordered or higher. diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -491,7 +491,7 @@ LegalityQuery::MemDesc MMDesc; MMDesc.SizeInBits = MMO.getSizeInBits(); MMDesc.AlignInBits = MMO.getAlign().value() * 8; - MMDesc.Ordering = MMO.getOrdering(); + MMDesc.Ordering = MMO.getSuccessOrdering(); LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg()); LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action != @@ -3661,7 +3661,7 @@ LegalityQuery::MemDesc MMDesc; MMDesc.SizeInBits = WideMemSizeInBits; MMDesc.AlignInBits = MMO.getAlign().value() * 8; - MMDesc.Ordering = MMO.getOrdering(); + MMDesc.Ordering = MMO.getSuccessOrdering(); if (!isLegalOrBeforeLegalizer( {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}})) return false; diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -3846,8 +3846,7 @@ // This implementation doesn't work for atomics. Give up instead of doing // something invalid. - if (MMO->getOrdering() != AtomicOrdering::NotAtomic || - MMO->getFailureOrdering() != AtomicOrdering::NotAtomic) + if (MMO->isAtomic()) return UnableToLegalize; bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD; diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -352,8 +352,8 @@ SmallVector MemDescrs; for (const auto &MMO : MI.memoperands()) - MemDescrs.push_back({MMO->getSizeInBits(), - 8 * MMO->getAlign().value(), MMO->getOrdering()}); + MemDescrs.push_back({MMO->getSizeInBits(), 8 * MMO->getAlign().value(), + MMO->getSuccessOrdering()}); return getAction({MI.getOpcode(), Types, MemDescrs}); } diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp --- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp +++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp @@ -125,7 +125,7 @@ MIOperands.push_back((unsigned)Op->getSize()); MIOperands.push_back((unsigned)Op->getFlags()); MIOperands.push_back((unsigned)Op->getOffset()); - MIOperands.push_back((unsigned)Op->getOrdering()); + MIOperands.push_back((unsigned)Op->getSuccessOrdering()); MIOperands.push_back((unsigned)Op->getAddrSpace()); MIOperands.push_back((unsigned)Op->getSyncScopeID()); MIOperands.push_back((unsigned)Op->getBaseAlign().value()); diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -439,9 +439,10 @@ MachineMemOperand *MachineFunction::getMachineMemOperand( const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) { - return new (Allocator) MachineMemOperand( - PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), AAMDNodes(), nullptr, - MMO->getSyncScopeID(), MMO->getOrdering(), MMO->getFailureOrdering()); + return new (Allocator) + MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), + AAMDNodes(), nullptr, MMO->getSyncScopeID(), + MMO->getSuccessOrdering(), MMO->getFailureOrdering()); } MachineMemOperand * @@ -457,10 +458,10 @@ // Do not preserve ranges, since we don't necessarily know what the high bits // are anymore. - return new (Allocator) - MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size, - Alignment, MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(), - MMO->getOrdering(), MMO->getFailureOrdering()); + return new (Allocator) MachineMemOperand( + PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size, Alignment, + MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(), + MMO->getSuccessOrdering(), MMO->getFailureOrdering()); } MachineMemOperand * @@ -472,7 +473,7 @@ return new (Allocator) MachineMemOperand( MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo, - MMO->getRanges(), MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(), MMO->getFailureOrdering()); } @@ -482,7 +483,7 @@ return new (Allocator) MachineMemOperand( MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(), MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), - MMO->getOrdering(), MMO->getFailureOrdering()); + MMO->getSuccessOrdering(), MMO->getFailureOrdering()); } MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1038,7 +1038,7 @@ AtomicInfo.SSID = static_cast(SSID); assert(getSyncScopeID() == SSID && "Value truncated"); AtomicInfo.Ordering = static_cast(Ordering); - assert(getOrdering() == Ordering && "Value truncated"); + assert(getSuccessOrdering() == Ordering && "Value truncated"); AtomicInfo.FailureOrdering = static_cast(FailureOrdering); assert(getFailureOrdering() == FailureOrdering && "Value truncated"); } @@ -1107,8 +1107,8 @@ printSyncScope(OS, Context, getSyncScopeID(), SSNs); - if (getOrdering() != AtomicOrdering::NotAtomic) - OS << toIRString(getOrdering()) << ' '; + if (getSuccessOrdering() != AtomicOrdering::NotAtomic) + OS << toIRString(getSuccessOrdering()) << ' '; if (getFailureOrdering() != AtomicOrdering::NotAtomic) OS << toIRString(getFailureOrdering()) << ' '; diff --git a/llvm/lib/CodeGen/MachineStableHash.cpp b/llvm/lib/CodeGen/MachineStableHash.cpp --- a/llvm/lib/CodeGen/MachineStableHash.cpp +++ b/llvm/lib/CodeGen/MachineStableHash.cpp @@ -182,7 +182,7 @@ HashComponents.push_back(static_cast(Op->getSize())); HashComponents.push_back(static_cast(Op->getFlags())); HashComponents.push_back(static_cast(Op->getOffset())); - HashComponents.push_back(static_cast(Op->getOrdering())); + HashComponents.push_back(static_cast(Op->getSuccessOrdering())); HashComponents.push_back(static_cast(Op->getAddrSpace())); HashComponents.push_back(static_cast(Op->getSyncScopeID())); HashComponents.push_back(static_cast(Op->getBaseAlign().value())); diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -819,7 +819,7 @@ // ldar and stlr have much more restrictive addressing modes (just a // register). - if (isStrongerThanMonotonic(cast(Use)->getOrdering())) + if (isStrongerThanMonotonic(cast(Use)->getSuccessOrdering())) return false; } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -2652,7 +2652,7 @@ auto &MemOp = **I.memoperands_begin(); uint64_t MemSizeInBytes = MemOp.getSize(); unsigned MemSizeInBits = MemSizeInBytes * 8; - AtomicOrdering Order = MemOp.getOrdering(); + AtomicOrdering Order = MemOp.getSuccessOrdering(); // Need special instructions for atomics that affect ordering. if (Order != AtomicOrdering::NotAtomic && diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -634,7 +634,7 @@ IsVolatile |= MMO->isVolatile(); InstrAddrSpace |= toSIAtomicAddrSpace(MMO->getPointerInfo().getAddrSpace()); - AtomicOrdering OpOrdering = MMO->getOrdering(); + AtomicOrdering OpOrdering = MMO->getSuccessOrdering(); if (OpOrdering != AtomicOrdering::NotAtomic) { const auto &IsSyncScopeInclusion = MMI->isSyncScopeInclusion(SSID, MMO->getSyncScopeID()); @@ -645,9 +645,9 @@ } SSID = IsSyncScopeInclusion.getValue() ? SSID : MMO->getSyncScopeID(); - Ordering = - isStrongerThan(Ordering, OpOrdering) ? - Ordering : MMO->getOrdering(); + Ordering = isStrongerThan(Ordering, OpOrdering) + ? Ordering + : MMO->getSuccessOrdering(); assert(MMO->getFailureOrdering() != AtomicOrdering::Release && MMO->getFailureOrdering() != AtomicOrdering::AcquireRelease); FailureOrdering = diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -9844,7 +9844,7 @@ } static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { - if (isStrongerThanMonotonic(cast(Op)->getOrdering())) + if (isStrongerThanMonotonic(cast(Op)->getSuccessOrdering())) // Acquire/Release load/store is not legal for targets without a dmb or // equivalent available. return SDValue(); diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -5283,7 +5283,7 @@ class acquiring_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ - AtomicOrdering Ordering = cast(N)->getOrdering(); + AtomicOrdering Ordering = cast(N)->getSuccessOrdering(); return isAcquireOrStronger(Ordering); }]>; @@ -5293,7 +5293,7 @@ class releasing_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ - AtomicOrdering Ordering = cast(N)->getOrdering(); + AtomicOrdering Ordering = cast(N)->getSuccessOrdering(); return isReleaseOrStronger(Ordering); }]>; diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1552,7 +1552,7 @@ auto *NewMMO = MF.getMachineMemOperand( MMO->getPointerInfo(), MMO->getFlags(), MMO->getSize(), MFI.getObjectAlign(FI), MMO->getAAInfo(), MMO->getRanges(), - MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getSyncScopeID(), MMO->getSuccessOrdering(), MMO->getFailureOrdering()); new_memops.push_back(NewMMO); KeepOld = false; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3017,7 +3017,7 @@ WideMMO = MF.getMachineMemOperand( MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), - MMO->getOrdering(), MMO->getFailureOrdering()); + MMO->getSuccessOrdering(), MMO->getFailureOrdering()); } SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -850,7 +850,7 @@ if (!LoadedVT.isSimple()) return false; - AtomicOrdering Ordering = LD->getOrdering(); + AtomicOrdering Ordering = LD->getSuccessOrdering(); // In order to lower atomic loads with stronger guarantees we would need to // use load.acquire or insert fences. However these features were only added // with PTX ISA 6.0 / sm_70. @@ -1717,7 +1717,7 @@ if (!StoreVT.isSimple()) return false; - AtomicOrdering Ordering = ST->getOrdering(); + AtomicOrdering Ordering = ST->getSuccessOrdering(); // In order to lower atomic loads with stronger guarantees we would need to // use store.release or insert fences. However these features were only added // with PTX ISA 6.0 / sm_70. diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2988,9 +2988,10 @@ } static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { - if (isStrongerThanMonotonic(cast(Op)->getOrdering())) - // Expand with a fence. - return SDValue(); + if (isStrongerThanMonotonic(cast(Op)->getSuccessOrdering())) { + // Expand with a fence. + return SDValue(); + } // Monotonic load/stores are legal. return Op; diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3932,7 +3932,7 @@ Node->getMemOperand()); // We have to enforce sequential consistency by performing a // serialization operation after the store. - if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent) + if (Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent) Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, Chain), 0); return Chain; @@ -5565,7 +5565,7 @@ DL, Tys, Ops, MVT::i128, MMO); // We have to enforce sequential consistency by performing a // serialization operation after the store. - if (cast(N)->getOrdering() == + if (cast(N)->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent) Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, Res), 0); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -29725,7 +29725,7 @@ // during codegen and then dropped. Note that we expect (but don't assume), // that orderings other than seq_cst and acq_rel have been canonicalized to // a store or load. - if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent && + if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent && AN->getSyncScopeID() == SyncScope::System) { // Prefer a locked operation against a stack location to minimize cache // traffic. This assumes that stack locations are very likely to be @@ -29758,7 +29758,8 @@ SDLoc dl(Node); EVT VT = Node->getMemoryVT(); - bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent; + bool IsSeqCst = + Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent; bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT); // If this store is not sequentially consistent and the type is legal diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -937,8 +937,8 @@ LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast(Op); assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); - assert((N->getOrdering() == AtomicOrdering::Unordered || - N->getOrdering() == AtomicOrdering::Monotonic) && + assert((N->getSuccessOrdering() == AtomicOrdering::Unordered || + N->getSuccessOrdering() == AtomicOrdering::Monotonic) && "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) @@ -968,8 +968,8 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast(Op); assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); - assert((N->getOrdering() == AtomicOrdering::Unordered || - N->getOrdering() == AtomicOrdering::Monotonic) && + assert((N->getSuccessOrdering() == AtomicOrdering::Unordered || + N->getSuccessOrdering() == AtomicOrdering::Monotonic) && "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4)