Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -123,11 +123,16 @@ // mask (ex: x86 blends). }; - /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. + /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists + /// because different targets have different levels of support for these + /// atomic RMW instructions, and also have different options w.r.t. what they should + /// expand to. enum class AtomicRMWExpansionKind { - Native, // Don't expand the instruction. - LLSC, // Expand the instruction into loadlinked/storeconditional. - CmpXChg, // Expand the instruction into cmpxchg. + None, // Don't expand the instruction. + LLSC, // Expand the instruction into loadlinked/storeconditional; used + // by ARM/AArch64. Implies `hasLoadLinkedStoreConditional` + // returns true. + CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. }; static ISD::NodeType getExtendForContent(BooleanContent Content) { @@ -1081,7 +1086,7 @@ /// AtomicRMW, if at all. Default is to never expand. virtual AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMWI) const { (void)RMWI; - return AtomicRMWExpansionKind::Native; + return AtomicRMWExpansionKind::None; } /// On some platforms, an AtomicRMW that never actually modifies the value Index: lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- lib/CodeGen/AtomicExpandPass.cpp +++ lib/CodeGen/AtomicExpandPass.cpp @@ -48,7 +48,7 @@ bool expandAtomicLoadToLL(LoadInst *LI); bool expandAtomicLoadToCmpXchg(LoadInst *LI); bool expandAtomicStore(StoreInst *SI); - bool expandAtomicRMW(AtomicRMWInst *AI); + bool tryExpandAtomicRMW(AtomicRMWInst *AI); bool expandAtomicRMWToLLSC(AtomicRMWInst *AI); bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI); bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); @@ -138,9 +138,9 @@ if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) { MadeChange = true; - continue; + } else { + MadeChange |= tryExpandAtomicRMW(RMWI); } - MadeChange |= expandAtomicRMW(RMWI); } else if (CASI && TLI->hasLoadLinkedStoreConditional()) { MadeChange |= expandAtomicCmpXchg(CASI); } @@ -214,7 +214,7 @@ // atomic if implemented as a native store. So we replace them by an // atomic swap, that can be implemented for example as a ldrex/strex on ARM // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes. - // It is the responsibility of the target to only return true in + // It is the responsibility of the target to only signal expansion via // shouldExpandAtomicRMW in cases where this is required and possible. IRBuilder<> Builder(SI); AtomicRMWInst *AI = @@ -223,24 +223,24 @@ SI->eraseFromParent(); // Now we have an appropriate swap instruction, lower it as usual. - return expandAtomicRMW(AI); + return tryExpandAtomicRMW(AI); } -bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) { - const auto How = TLI->shouldExpandAtomicRMWInIR(AI); - switch(How) { - case TargetLoweringBase::AtomicRMWExpansionKind::Native: return false; - case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: { - assert(TLI->hasLoadLinkedStoreConditional() && - "TargetLowering requested we expand AtomicRMW instruction into \ - load-linked/store-conditional combos, but such instruction aren't \ - supported"); - - return expandAtomicRMWToLLSC(AI); - } - case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: { - return expandAtomicRMWToCmpXchg(AI); - } +bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { + switch (TLI->shouldExpandAtomicRMWInIR(AI)) { + case TargetLoweringBase::AtomicRMWExpansionKind::None: + return false; + case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: { + assert(TLI->hasLoadLinkedStoreConditional() && + "TargetLowering requested we expand AtomicRMW instruction into " + "load-linked/store-conditional combos, but such instructions aren't " + "supported"); + + return expandAtomicRMWToLLSC(AI); + } + case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: { + return expandAtomicRMWToCmpXchg(AI); + } }; } Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -8775,11 +8775,8 @@ TargetLoweringBase::AtomicRMWExpansionKind AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { unsigned Size = AI->getType()->getPrimitiveSizeInBits(); - if (Size <= 128) { - return TargetLoweringBase::AtomicRMWExpansionKind::LLSC; - } else { - return TargetLoweringBase::AtomicRMWExpansionKind::Native; - } + return Size <= 128 ? + AtomicRMWExpansionKind::LLSC : AtomicRMWExpansionKind::None; } bool AArch64TargetLowering::hasLoadLinkedStoreConditional() const { Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -11103,11 +11103,8 @@ TargetLoweringBase::AtomicRMWExpansionKind ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { unsigned Size = AI->getType()->getPrimitiveSizeInBits(); - if (Size <= (Subtarget->isMClass() ? 32U : 64U)) { - return TargetLoweringBase::AtomicRMWExpansionKind::LLSC; - } else { - return TargetLoweringBase::AtomicRMWExpansionKind::Native; - } + return (Size <= (Subtarget->isMClass() ? 32U : 64U)) ? + AtomicRMWExpansionKind::LLSC : AtomicRMWExpansionKind::None; } // This has so far only been implemented for MachO. Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -19615,10 +19615,8 @@ // If the operand is too big, we must see if cmpxchg8/16b is available // and default to library calls otherwise. if (MemType->getPrimitiveSizeInBits() > NativeWidth) { - if (needsCmpXchgNb(MemType)) - return TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg; - else - return TargetLoweringBase::AtomicRMWExpansionKind::Native; + return needsCmpXchgNb(MemType) ? + AtomicRMWExpansionKind::CmpXChg : AtomicRMWExpansionKind::None; } AtomicRMWInst::BinOp Op = AI->getOperation(); @@ -19629,16 +19627,14 @@ case AtomicRMWInst::Add: case AtomicRMWInst::Sub: // It's better to use xadd, xsub or xchg for these in all cases. - return TargetLoweringBase::AtomicRMWExpansionKind::Native; + return AtomicRMWExpansionKind::None; case AtomicRMWInst::Or: case AtomicRMWInst::And: case AtomicRMWInst::Xor: // If the atomicrmw's result isn't actually used, we can just add a "lock" // prefix to a normal instruction for these operations. - if (!AI->use_empty()) - return TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg; - else - return TargetLoweringBase::AtomicRMWExpansionKind::Native; + return !AI->use_empty() ? + AtomicRMWExpansionKind::CmpXChg : AtomicRMWExpansionKind::None; case AtomicRMWInst::Nand: case AtomicRMWInst::Max: case AtomicRMWInst::Min: @@ -19646,7 +19642,7 @@ case AtomicRMWInst::UMin: // These always require a non-trivial set of data operations on x86. We must // use a cmpxchg loop. - return TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg; + return AtomicRMWExpansionKind::CmpXChg; } }