diff --git a/llvm/docs/Atomics.rst b/llvm/docs/Atomics.rst --- a/llvm/docs/Atomics.rst +++ b/llvm/docs/Atomics.rst @@ -453,10 +453,11 @@ atomic constructs. Here are some lowerings it can do: * cmpxchg -> loop with load-linked/store-conditional - by overriding ``shouldExpandAtomicCmpXchgInIR()``, ``emitLoadLinked()``, - ``emitStoreConditional()`` + by overriding ``shouldExpandAtomicInstInIR(AtomicCmpXchgInst *)``, + ``emitLoadLinked()``, ``emitStoreConditional()`` * large loads/stores -> ll-sc/cmpxchg - by overriding ``shouldExpandAtomicStoreInIR()``/``shouldExpandAtomicLoadInIR()`` + by overriding ``shouldExpandAtomicInstInIR(StoreInst *)``/ + ``shouldExpandAtomicInstInIR(LoadInst *)`` * strong atomic accesses -> monotonic accesses + fences by overriding ``shouldInsertFencesForAtomic()``, ``emitLeadingFence()``, and ``emitTrailingFence()`` @@ -464,8 +465,10 @@ by overriding ``expandAtomicRMWInIR()`` * expansion to __atomic_* libcalls for unsupported sizes. * part-word atomicrmw/cmpxchg -> target-specific intrinsic by overriding - ``shouldExpandAtomicRMWInIR``, ``emitMaskedAtomicRMWIntrinsic``, - ``shouldExpandAtomicCmpXchgInIR``, and ``emitMaskedAtomicCmpXchgIntrinsic``. + ``shouldExpandAtomicInstInIR(AtomicRMWInst *)``, + ``emitMaskedAtomicRMWIntrinsic``, + ``shouldExpandAtomicInstInIR(AtomicCmpXchgInst *)``, + and ``emitMaskedAtomicCmpXchgIntrinsic``. For an example of these look at the ARM (first five lowerings) or RISC-V (last lowering) backend. diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -2041,29 +2041,30 @@ /// Returns how the given (atomic) load should be expanded by the /// IR-level AtomicExpand pass. - virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const { + virtual AtomicExpansionKind shouldExpandAtomicInstInIR(LoadInst *LI) const { return AtomicExpansionKind::None; } /// Returns how the given (atomic) store should be expanded by the IR-level /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try /// to use an atomicrmw xchg. - virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { + virtual AtomicExpansionKind shouldExpandAtomicInstInIR(StoreInst *SI) const { return AtomicExpansionKind::None; } /// Returns how the given atomic cmpxchg should be expanded by the IR-level /// AtomicExpand pass. virtual AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const { return AtomicExpansionKind::None; } /// Returns how the IR-level AtomicExpand pass should expand the given /// AtomicRMW, if at all. Default is to never expand. - virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { - return RMW->isFloatingPointOperation() ? - AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; + virtual AtomicExpansionKind + shouldExpandAtomicInstInIR(AtomicRMWInst *RMW) const { + return RMW->isFloatingPointOperation() ? AtomicExpansionKind::CmpXChg + : AtomicExpansionKind::None; } /// On some platforms, an AtomicRMW that never actually modifies the value @@ -2075,8 +2076,8 @@ /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf /// This method tries doing that transformation, returning the atomic load if /// it succeeds, and nullptr otherwise. - /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo - /// another round of expansion. + /// If shouldExpandAtomicInstInIR(LoadInst *) returns true on that load, it + /// will undergo another round of expansion. virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { return nullptr; diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -234,7 +234,7 @@ FenceOrdering = RMWI->getOrdering(); RMWI->setOrdering(AtomicOrdering::Monotonic); } else if (CASI && - TLI->shouldExpandAtomicCmpXchgInIR(CASI) == + TLI->shouldExpandAtomicInstInIR(CASI) == TargetLoweringBase::AtomicExpansionKind::None && (isReleaseOrStronger(CASI->getSuccessOrdering()) || isAcquireOrStronger(CASI->getSuccessOrdering()) || @@ -400,7 +400,7 @@ } bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { - switch (TLI->shouldExpandAtomicLoadInIR(LI)) { + switch (TLI->shouldExpandAtomicInstInIR(LI)) { case TargetLoweringBase::AtomicExpansionKind::None: return false; case TargetLoweringBase::AtomicExpansionKind::LLSC: @@ -422,7 +422,7 @@ } bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) { - switch (TLI->shouldExpandAtomicStoreInIR(SI)) { + switch (TLI->shouldExpandAtomicInstInIR(SI)) { case TargetLoweringBase::AtomicExpansionKind::None: return false; case TargetLoweringBase::AtomicExpansionKind::Expand: @@ -507,7 +507,8 @@ // atomic swap, that can be implemented for example as a ldrex/strex on ARM // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes. // It is the responsibility of the target to only signal expansion via - // shouldExpandAtomicRMW in cases where this is required and possible. + // shouldExpandAtomicInstInIR(AtomicRMWInst *) in cases where this is required + // and possible. IRBuilder<> Builder(SI); AtomicRMWInst *AI = Builder.CreateAtomicRMW( AtomicRMWInst::Xchg, SI->getPointerOperand(), SI->getValueOperand(), @@ -546,7 +547,8 @@ bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { LLVMContext &Ctx = AI->getModule()->getContext(); - TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI); + TargetLowering::AtomicExpansionKind Kind = + TLI->shouldExpandAtomicInstInIR(AI); switch (Kind) { case TargetLoweringBase::AtomicExpansionKind::None: return false; @@ -1491,7 +1493,7 @@ unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8; unsigned ValueSize = getAtomicOpSize(CI); - switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) { + switch (TLI->shouldExpandAtomicInstInIR(CI)) { default: llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg"); case TargetLoweringBase::AtomicExpansionKind::None: diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -676,14 +676,14 @@ bool shouldInsertFencesForAtomic(const Instruction *I) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicLoadInIR(LoadInst *LI) const override; + shouldExpandAtomicInstInIR(LoadInst *LI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + shouldExpandAtomicInstInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const override; bool useLoadStackGuardNode() const override; TargetLoweringBase::LegalizeTypeAction diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19672,7 +19672,7 @@ // are doomed anyway, so defer to the default libcall and blame the OS when // things go wrong. TargetLoweringBase::AtomicExpansionKind -AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +AArch64TargetLowering::shouldExpandAtomicInstInIR(StoreInst *SI) const { unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); if (Size != 128 || isOpSuitableForLDPSTP(SI)) return AtomicExpansionKind::None; @@ -19683,7 +19683,7 @@ // are doomed anyway, so defer to the default libcall and blame the OS when // things go wrong. TargetLowering::AtomicExpansionKind -AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { +AArch64TargetLowering::shouldExpandAtomicInstInIR(LoadInst *LI) const { unsigned Size = LI->getType()->getPrimitiveSizeInBits(); if (Size != 128 || isOpSuitableForLDPSTP(LI)) @@ -19702,7 +19702,7 @@ // For the real atomic operations, we have ldxr/stxr up to 128 bits, TargetLowering::AtomicExpansionKind -AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +AArch64TargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { if (AI->isFloatingPointOperation()) return AtomicExpansionKind::CmpXChg; @@ -19742,7 +19742,7 @@ } TargetLowering::AtomicExpansionKind -AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR( +AArch64TargetLowering::shouldExpandAtomicInstInIR( AtomicCmpXchgInst *AI) const { // If subtarget has LSE, leave cmpxchg intact for codegen. if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -334,7 +334,7 @@ return MVT::i32; } - AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; + AtomicExpansionKind shouldExpandAtomicInstInIR(AtomicRMWInst *) const override; bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, LLT Ty2) const override; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -4794,7 +4794,7 @@ } TargetLowering::AtomicExpansionKind -AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { +AMDGPUTargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *RMW) const { switch (RMW->getOperation()) { case AtomicRMWInst::Nand: case AtomicRMWInst::FAdd: diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -473,11 +473,12 @@ const SelectionDAG &DAG, bool SNaN = false, unsigned Depth = 0) const override; - AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; - AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override; AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *) const override; + AtomicExpansionKind shouldExpandAtomicInstInIR(LoadInst *LI) const override; + AtomicExpansionKind shouldExpandAtomicInstInIR(StoreInst *SI) const override; + AtomicExpansionKind + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const override; virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent) const override; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -12546,7 +12546,7 @@ } TargetLowering::AtomicExpansionKind -SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { +SITargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *RMW) const { unsigned AS = RMW->getPointerAddressSpace(); if (AS == AMDGPUAS::PRIVATE_ADDRESS) return AtomicExpansionKind::NotAtomic; @@ -12637,25 +12637,25 @@ break; } - return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); + return AMDGPUTargetLowering::shouldExpandAtomicInstInIR(RMW); } TargetLowering::AtomicExpansionKind -SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { +SITargetLowering::shouldExpandAtomicInstInIR(LoadInst *LI) const { return LI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ? AtomicExpansionKind::NotAtomic : AtomicExpansionKind::None; } TargetLowering::AtomicExpansionKind -SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +SITargetLowering::shouldExpandAtomicInstInIR(StoreInst *SI) const { return SI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ? AtomicExpansionKind::NotAtomic : AtomicExpansionKind::None; } TargetLowering::AtomicExpansionKind -SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const { +SITargetLowering::shouldExpandAtomicInstInIR(AtomicCmpXchgInst *CmpX) const { return CmpX->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ? AtomicExpansionKind::NotAtomic : AtomicExpansionKind::None; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -664,13 +664,13 @@ bool shouldInsertFencesForAtomic(const Instruction *I) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicLoadInIR(LoadInst *LI) const override; + shouldExpandAtomicInstInIR(LoadInst *LI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + shouldExpandAtomicInstInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const override; bool useLoadStackGuardNode() const override; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -20950,7 +20950,7 @@ // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit // anything for those. TargetLoweringBase::AtomicExpansionKind -ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +ARMTargetLowering::shouldExpandAtomicInstInIR(StoreInst *SI) const { bool has64BitAtomicStore; if (Subtarget->isMClass()) has64BitAtomicStore = false; @@ -20972,7 +20972,7 @@ // guarantee, see DDI0406C ARM architecture reference manual, // sections A8.8.72-74 LDRD) TargetLowering::AtomicExpansionKind -ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { +ARMTargetLowering::shouldExpandAtomicInstInIR(LoadInst *LI) const { bool has64BitAtomicLoad; if (Subtarget->isMClass()) has64BitAtomicLoad = false; @@ -20989,7 +20989,7 @@ // For the real atomic operations, we have ldrex/strex up to 32 bits, // and up to 64 bits on the non-M profiles TargetLowering::AtomicExpansionKind -ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +ARMTargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { if (AI->isFloatingPointOperation()) return AtomicExpansionKind::CmpXChg; @@ -21014,10 +21014,10 @@ return AtomicExpansionKind::None; } -// Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 -// bits, and up to 64 bits on the non-M profiles. +// Similar to shouldExpandAtomicInstInIR(AtomicRMWInst *), ldrex/strex can be +// used up to 32 bits, and up to 64 bits on the non-M profiles. TargetLowering::AtomicExpansionKind -ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { +ARMTargetLowering::shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const { // At -O0, fast-regalloc cannot cope with the live vregs necessary to // implement cmpxchg without spilling. If the address being exchanged is also // on the stack and close enough to the spill slot, this can lead to a diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h @@ -327,13 +327,12 @@ AtomicOrdering Ord) const override; Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override; - AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; - AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + AtomicExpansionKind shouldExpandAtomicInstInIR(LoadInst *LI) const override; + AtomicExpansionKind shouldExpandAtomicInstInIR(StoreInst *SI) const override; AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; - + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const override; AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override { + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override { return AtomicExpansionKind::LLSC; } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3659,7 +3659,7 @@ } TargetLowering::AtomicExpansionKind -HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { +HexagonTargetLowering::shouldExpandAtomicInstInIR(LoadInst *LI) const { // Do not expand loads and stores that don't exceed 64 bits. return LI->getType()->getPrimitiveSizeInBits() > 64 ? AtomicExpansionKind::LLOnly @@ -3667,7 +3667,7 @@ } TargetLowering::AtomicExpansionKind -HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +HexagonTargetLowering::shouldExpandAtomicInstInIR(StoreInst *SI) const { // Do not expand loads and stores that don't exceed 64 bits. return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 ? AtomicExpansionKind::Expand @@ -3675,7 +3675,6 @@ } TargetLowering::AtomicExpansionKind -HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR( - AtomicCmpXchgInst *AI) const { +HexagonTargetLowering::shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const { return AtomicExpansionKind::LLSC; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -913,10 +913,10 @@ bool shouldInlineQuadwordAtomics() const; TargetLowering::AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; TargetLowering::AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const override; Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -6238,7 +6238,7 @@ ArgOffset += PtrByteSize; continue; } - // Copy the object to parameter save area if it can not be entirely passed + // Copy the object to parameter save area if it can not be entirely passed // by registers. // FIXME: we only need to copy the parts which need to be passed in // parameter save area. For the parts passed by registers, we don't need @@ -6871,7 +6871,7 @@ // // Low Memory +--------------------------------------------+ // SP +---> | Back chain | ---+ -// | +--------------------------------------------+ | +// | +--------------------------------------------+ | // | | Saved Condition Register | | // | +--------------------------------------------+ | // | | Saved Linkage Register | | @@ -7836,7 +7836,7 @@ return SDValue(); SDValue N1 = Op.getOperand(0); - EVT SrcVT = N1.getValueType(); + EVT SrcVT = N1.getValueType(); unsigned SrcSize = SrcVT.getSizeInBits(); if (SrcSize > 256 || !isPowerOf2_32(SrcVT.getVectorNumElements()) || @@ -18081,19 +18081,19 @@ } TargetLowering::AtomicExpansionKind -PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +PPCTargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { unsigned Size = AI->getType()->getPrimitiveSizeInBits(); if (shouldInlineQuadwordAtomics() && Size == 128) return AtomicExpansionKind::MaskedIntrinsic; - return TargetLowering::shouldExpandAtomicRMWInIR(AI); + return TargetLowering::shouldExpandAtomicInstInIR(AI); } TargetLowering::AtomicExpansionKind -PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { +PPCTargetLowering::shouldExpandAtomicInstInIR(AtomicCmpXchgInst *AI) const { unsigned Size = AI->getNewValOperand()->getType()->getPrimitiveSizeInBits(); if (shouldInlineQuadwordAtomics() && Size == 128) return AtomicExpansionKind::MaskedIntrinsic; - return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI); + return TargetLowering::shouldExpandAtomicInstInIR(AI); } static Intrinsic::ID diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -510,13 +510,13 @@ SDValue ConstNode) const override; TargetLowering::AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override; TargetLowering::AtomicExpansionKind - shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override; + shouldExpandAtomicInstInIR(AtomicCmpXchgInst *CI) const override; Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -11518,7 +11518,7 @@ } TargetLowering::AtomicExpansionKind -RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +RISCVTargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating // point operations can't be used in an lr/sc sequence without breaking the // forward-progress guarantee. @@ -11626,8 +11626,7 @@ } TargetLowering::AtomicExpansionKind -RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( - AtomicCmpXchgInst *CI) const { +RISCVTargetLowering::shouldExpandAtomicInstInIR(AtomicCmpXchgInst *CI) const { unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); if (Size == 8 || Size == 16) return AtomicExpansionKind::MaskedIntrinsic; diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h --- a/llvm/lib/Target/Sparc/SparcISelLowering.h +++ b/llvm/lib/Target/Sparc/SparcISelLowering.h @@ -201,7 +201,8 @@ return true; } - AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + AtomicExpansionKind + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; void ReplaceNodeResults(SDNode *N, SmallVectorImpl& Results, diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -1397,7 +1397,8 @@ // TargetLowering Implementation //===----------------------------------------------------------------------===// -TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +TargetLowering::AtomicExpansionKind +SparcTargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { if (AI->getOperation() == AtomicRMWInst::Xchg && AI->getType()->getPrimitiveSizeInBits() == 32) return AtomicExpansionKind::None; // Uses xchg instruction diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h --- a/llvm/lib/Target/VE/VEISelLowering.h +++ b/llvm/lib/Target/VE/VEISelLowering.h @@ -112,7 +112,7 @@ Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; ISD::NodeType getExtendForAtomicOps() const override { return ISD::ANY_EXTEND; } diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1121,7 +1121,7 @@ } TargetLowering::AtomicExpansionKind -VETargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +VETargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { // We have TS1AM implementation for i8/i16/i32/i64, so use it. if (AI->getOperation() == AtomicRMWInst::Xchg) { return AtomicExpansionKind::None; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -53,7 +53,8 @@ /// right decision when generating code for different targets. const WebAssemblySubtarget *Subtarget; - AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; + AtomicExpansionKind + shouldExpandAtomicInstInIR(AtomicRMWInst *) const override; bool shouldScalarizeBinop(SDValue VecOp) const override; FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -363,7 +363,7 @@ } TargetLowering::AtomicExpansionKind -WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +WebAssemblyTargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { // We have wasm instructions for these switch (AI->getOperation()) { case AtomicRMWInst::Add: diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1623,11 +1623,11 @@ const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicLoadInIR(LoadInst *LI) const override; + shouldExpandAtomicInstInIR(LoadInst *LI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + shouldExpandAtomicInstInIR(StoreInst *SI) const override; TargetLoweringBase::AtomicExpansionKind - shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const override; TargetLoweringBase::AtomicExpansionKind shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const; void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const override; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -30472,7 +30472,7 @@ } TargetLoweringBase::AtomicExpansionKind -X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { +X86TargetLowering::shouldExpandAtomicInstInIR(StoreInst *SI) const { Type *MemType = SI->getValueOperand()->getType(); bool NoImplicitFloatOps = @@ -30489,7 +30489,7 @@ // Note: this turns large loads into lock cmpxchg8b/16b. // TODO: In 32-bit mode, use MOVLPS when SSE1 is available? TargetLowering::AtomicExpansionKind -X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { +X86TargetLowering::shouldExpandAtomicInstInIR(LoadInst *LI) const { Type *MemType = LI->getType(); // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we @@ -30566,7 +30566,7 @@ } TargetLowering::AtomicExpansionKind -X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { +X86TargetLowering::shouldExpandAtomicInstInIR(AtomicRMWInst *AI) const { unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32; Type *MemType = AI->getType(); @@ -44387,7 +44387,7 @@ // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might // get split by legalization. if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST && - CondVT.getVectorElementType() == MVT::i1 && Cond.hasOneUse() && + CondVT.getVectorElementType() == MVT::i1 && Cond.hasOneUse() && TLI.isTypeLegal(VT.getScalarType())) { EVT ExtCondVT = VT.changeVectorElementTypeToInteger(); if (SDValue ExtCond = combineToExtendBoolVectorInReg(