diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -554,9 +554,8 @@ bool preferZeroCompareBranch() const override { return true; } - bool shouldInsertFencesForAtomic(const Instruction *I) const override { - return isa(I) || isa(I); - } + bool shouldInsertFencesForAtomic(const Instruction *I) const override; + Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override; Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, @@ -637,6 +636,9 @@ bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override; + TargetLoweringBase::AtomicExpansionKind + shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -10093,6 +10093,15 @@ // Returns condition code of comparison operation. ISD::CondCode getCondCode() const { return CCode; } }; + +// returns true for seq_cst stores of 32/64bit +bool canAmoSwapStoreInst(const StoreInst *SI, const RISCVSubtarget &Subtarget) { + unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); + return EnableSeqCstFence && !Subtarget.hasForcedAtomics() && + SI->getOrdering() == AtomicOrdering::SequentiallyConsistent && + (Size == 32 || Size == 64); +} + } // namespace // Verifies conditions to apply an optimization. @@ -15774,12 +15783,39 @@ if (isa(Inst) && isAcquireOrStronger(Ord)) return Builder.CreateFence(AtomicOrdering::Acquire); + if (EnableSeqCstFence && isa(Inst) && - Ord == AtomicOrdering::SequentiallyConsistent) - return Builder.CreateFence(AtomicOrdering::SequentiallyConsistent); + Ord == AtomicOrdering::SequentiallyConsistent) { + // We only use the trailing fence for s{b,h}. + // Table A.6 prescribes AMOSWAP for s{w,d} + // fence rw, w + // s{b,h} + // fence rw, rw + auto *SI = cast(Inst); + if (!canAmoSwapStoreInst(SI, Subtarget)) + return Builder.CreateFence(AtomicOrdering::SequentiallyConsistent); + } return nullptr; } +bool RISCVTargetLowering::shouldInsertFencesForAtomic( + const Instruction *I) const { + if (isa(I)) + return true; + if (auto *SI = dyn_cast(I)) + return !canAmoSwapStoreInst(SI, Subtarget); + return false; +} + +TargetLoweringBase::AtomicExpansionKind +RISCVTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { + if (canAmoSwapStoreInst(SI, Subtarget)) { + SI->setOrdering(AtomicOrdering::Release); + return AtomicExpansionKind::Expand; + } + return AtomicExpansionKind::None; +} + TargetLowering::AtomicExpansionKind RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll --- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll @@ -1447,26 +1447,22 @@ ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_store_i32_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: -; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, w -; RV32IA-WMO-TRAILING-FENCE-NEXT: sw a1, 0(a0) -; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw +; RV32IA-WMO-TRAILING-FENCE-NEXT: amoswap.w.rl a0, a1, (a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_store_i32_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: -; RV32IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0) +; RV32IA-TSO-TRAILING-FENCE-NEXT: amoswap.w.rl a0, a1, (a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_store_i32_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: -; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, w -; RV64IA-WMO-TRAILING-FENCE-NEXT: sw a1, 0(a0) -; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw +; RV64IA-WMO-TRAILING-FENCE-NEXT: amoswap.w.rl a0, a1, (a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_store_i32_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: -; RV64IA-TSO-TRAILING-FENCE-NEXT: sw a1, 0(a0) +; RV64IA-TSO-TRAILING-FENCE-NEXT: amoswap.w.rl a0, a1, (a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret store atomic i32 %b, ptr %a seq_cst, align 4 ret void @@ -1650,14 +1646,12 @@ ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_store_i64_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: -; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, w -; RV64IA-WMO-TRAILING-FENCE-NEXT: sd a1, 0(a0) -; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw +; RV64IA-WMO-TRAILING-FENCE-NEXT: amoswap.d.rl a0, a1, (a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_store_i64_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: -; RV64IA-TSO-TRAILING-FENCE-NEXT: sd a1, 0(a0) +; RV64IA-TSO-TRAILING-FENCE-NEXT: amoswap.d.rl a0, a1, (a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret store atomic i64 %b, ptr %a seq_cst, align 8 ret void