Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -256,6 +256,7 @@ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. BitTestIntrinsic, // Use a target-specific intrinsic for special bit // operations; used by X86. + Expand, // Generic expansion in terms of other atomic operations. }; /// Enum that specifies when a multiplication should be expanded. @@ -2020,12 +2021,6 @@ // be unnecessarily held, except if clrex, inserted by this hook, is executed. virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} - /// Returns true if the given (atomic) store should be expanded by the - /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input. - virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const { - return false; - } - /// Returns true if arguments should be sign-extended in lib calls. virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { return IsSigned; @@ -2042,6 +2037,13 @@ return AtomicExpansionKind::None; } + /// Returns how the given (atomic) store should be expanded by the IR-level + /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try + /// to use an atomicrmw xchg. + virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { + return AtomicExpansionKind::None; + } + /// Returns how the given atomic cmpxchg should be expanded by the IR-level /// AtomicExpand pass. virtual AtomicExpansionKind Index: llvm/lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- llvm/lib/CodeGen/AtomicExpandPass.cpp +++ llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -77,6 +77,7 @@ bool expandAtomicLoadToLL(LoadInst *LI); bool expandAtomicLoadToCmpXchg(LoadInst *LI); StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI); + bool tryExpandAtomicStore(StoreInst *SI); void expandAtomicStore(StoreInst *SI); bool tryExpandAtomicRMW(AtomicRMWInst *AI); AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI); @@ -271,10 +272,8 @@ MadeChange = true; } - if (TLI->shouldExpandAtomicStoreInIR(SI)) { - expandAtomicStore(SI); + if (tryExpandAtomicStore(SI)) MadeChange = true; - } } else if (RMWI) { // There are two different ways of expanding RMW instructions: // - into a load if it is idempotent @@ -418,6 +417,18 @@ } } +bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) { + switch (TLI->shouldExpandAtomicStoreInIR(SI)) { + case TargetLoweringBase::AtomicExpansionKind::None: + return false; + case TargetLoweringBase::AtomicExpansionKind::Expand: + expandAtomicStore(SI); + return true; + default: + llvm_unreachable("Unhandled case in tryExpandAtomicStore"); + } +} + bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { IRBuilder<> Builder(LI); Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -673,7 +673,8 @@ TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; - bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + TargetLoweringBase::AtomicExpansionKind + shouldExpandAtomicStoreInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19369,12 +19369,12 @@ // Loads and stores less than 128-bits are already atomic; ones above that // are doomed anyway, so defer to the default libcall and blame the OS when // things go wrong. -bool AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +TargetLoweringBase::AtomicExpansionKind +AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); - if (Size != 128) - return false; - - return !isOpSuitableForLDPSTP(SI); + if (Size != 128 || isOpSuitableForLDPSTP(SI)) + return AtomicExpansionKind::None; + return AtomicExpansionKind::Expand; } // Loads and stores less than 128-bits are already atomic; ones above that Index: llvm/lib/Target/ARM/ARMISelLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.h +++ llvm/lib/Target/ARM/ARMISelLowering.h @@ -665,7 +665,8 @@ bool shouldInsertFencesForAtomic(const Instruction *I) const override; TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; - bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + TargetLoweringBase::AtomicExpansionKind + shouldExpandAtomicStoreInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; TargetLoweringBase::AtomicExpansionKind Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -20949,7 +20949,8 @@ // are doomed anyway, so defer to the default libcall and blame the OS when // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit // anything for those. -bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +TargetLoweringBase::AtomicExpansionKind +ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { bool has64BitAtomicStore; if (Subtarget->isMClass()) has64BitAtomicStore = false; @@ -20959,7 +20960,8 @@ has64BitAtomicStore = Subtarget->hasV6Ops(); unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); - return Size == 64 && has64BitAtomicStore; + return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand + : AtomicExpansionKind::None; } // Loads and stores less than 64-bits are already atomic; ones above that Index: llvm/lib/Target/Hexagon/HexagonISelLowering.h =================================================================== --- llvm/lib/Target/Hexagon/HexagonISelLowering.h +++ llvm/lib/Target/Hexagon/HexagonISelLowering.h @@ -328,7 +328,7 @@ Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override; AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; - bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override; AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; Index: llvm/lib/Target/Hexagon/HexagonISelLowering.cpp =================================================================== --- llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3666,9 +3666,12 @@ : AtomicExpansionKind::None; } -bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +TargetLowering::AtomicExpansionKind +HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { // Do not expand loads and stores that don't exceed 64 bits. - return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64; + return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 + ? AtomicExpansionKind::Expand + : AtomicExpansionKind::None; } TargetLowering::AtomicExpansionKind Index: llvm/lib/Target/X86/X86ISelLowering.h =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.h +++ llvm/lib/Target/X86/X86ISelLowering.h @@ -1621,7 +1621,8 @@ TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; - bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + TargetLoweringBase::AtomicExpansionKind + shouldExpandAtomicStoreInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; TargetLoweringBase::AtomicExpansionKind Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -30421,7 +30421,8 @@ return false; } -bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +TargetLoweringBase::AtomicExpansionKind +X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { Type *MemType = SI->getValueOperand()->getType(); bool NoImplicitFloatOps = @@ -30429,9 +30430,10 @@ if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() && !Subtarget.useSoftFloat() && !NoImplicitFloatOps && (Subtarget.hasSSE1() || Subtarget.hasX87())) - return false; + return AtomicExpansionKind::None; - return needsCmpXchgNb(MemType); + return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand + : AtomicExpansionKind::None; } // Note: this turns large loads into lock cmpxchg8b/16b.