Index: include/llvm/CodeGen/ISDOpcodes.h =================================================================== --- include/llvm/CodeGen/ISDOpcodes.h +++ include/llvm/CodeGen/ISDOpcodes.h @@ -806,6 +806,8 @@ ATOMIC_LOAD_MAX, ATOMIC_LOAD_UMIN, ATOMIC_LOAD_UMAX, + ATOMIC_LOAD_FADD, + ATOMIC_LOAD_FSUB, // Masked load and store - consecutive vector load and store operations // with additional mask operand that prevents memory accesses to the Index: include/llvm/CodeGen/SelectionDAGNodes.h =================================================================== --- include/llvm/CodeGen/SelectionDAGNodes.h +++ include/llvm/CodeGen/SelectionDAGNodes.h @@ -1356,6 +1356,8 @@ N->getOpcode() == ISD::ATOMIC_LOAD_MAX || N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || + N->getOpcode() == ISD::ATOMIC_LOAD_FADD || + N->getOpcode() == ISD::ATOMIC_LOAD_FSUB || N->getOpcode() == ISD::ATOMIC_LOAD || N->getOpcode() == ISD::ATOMIC_STORE || N->getOpcode() == ISD::MLOAD || @@ -1408,6 +1410,8 @@ N->getOpcode() == ISD::ATOMIC_LOAD_MAX || N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || + N->getOpcode() == ISD::ATOMIC_LOAD_FADD || + N->getOpcode() == ISD::ATOMIC_LOAD_FSUB || N->getOpcode() == ISD::ATOMIC_LOAD || N->getOpcode() == ISD::ATOMIC_STORE; } Index: include/llvm/Target/TargetSelectionDAG.td =================================================================== --- include/llvm/Target/TargetSelectionDAG.td +++ include/llvm/Target/TargetSelectionDAG.td @@ -259,6 +259,11 @@ def SDTAtomic2 : SDTypeProfile<1, 2, [ SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1> ]>; + +def SDTFPAtomic2 : SDTypeProfile<1, 2, [ + SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1> +]>; + def SDTAtomicStore : SDTypeProfile<0, 2, [ SDTCisPtrTy<0>, SDTCisInt<1> ]>; @@ -502,6 +507,11 @@ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_fadd : SDNode<"ISD::ATOMIC_LOAD_FADD" , SDTFPAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_fsub : SDNode<"ISD::ATOMIC_LOAD_FSUB" , SDTFPAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; + def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore, Index: lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- lib/CodeGen/AtomicExpandPass.cpp +++ lib/CodeGen/AtomicExpandPass.cpp @@ -548,6 +548,10 @@ case AtomicRMWInst::UMin: NewVal = Builder.CreateICmpULE(Loaded, Inc); return Builder.CreateSelect(NewVal, Loaded, Inc, "new"); + case AtomicRMWInst::FAdd: + return Builder.CreateFAdd(Loaded, Inc, "new"); + case AtomicRMWInst::FSub: + return Builder.CreateFSub(Loaded, Inc, "new"); default: llvm_unreachable("Unknown atomic op"); } @@ -1517,10 +1521,10 @@ case AtomicRMWInst::Min: case AtomicRMWInst::UMax: case AtomicRMWInst::UMin: + case AtomicRMWInst::FAdd: + case AtomicRMWInst::FSub: // No atomic libcalls are available for max/min/umax/umin. return {}; - default: - llvm_unreachable("unhandled operation"); } llvm_unreachable("Unexpected AtomicRMW operation."); } Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6247,6 +6247,8 @@ Opcode == ISD::ATOMIC_LOAD_MAX || Opcode == ISD::ATOMIC_LOAD_UMIN || Opcode == ISD::ATOMIC_LOAD_UMAX || + Opcode == ISD::ATOMIC_LOAD_FADD || + Opcode == ISD::ATOMIC_LOAD_FSUB || Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_STORE) && "Invalid Atomic Op"); Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4194,6 +4194,8 @@ case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; + case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break; + case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break; } AtomicOrdering Order = I.getOrdering(); SyncScope::ID SSID = I.getSyncScopeID(); Index: lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -96,6 +96,7 @@ case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax"; case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin"; case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax"; + case ISD::ATOMIC_LOAD_FADD: return "AtomicLoadFAdd"; case ISD::ATOMIC_LOAD: return "AtomicLoad"; case ISD::ATOMIC_STORE: return "AtomicStore"; case ISD::PCMARKER: return "PCMarker"; Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -470,7 +470,7 @@ if (isa(N) || (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC || - Opc == AMDGPUISD::ATOMIC_LOAD_FADD || + Opc == ISD::ATOMIC_LOAD_FADD || Opc == AMDGPUISD::ATOMIC_LOAD_FMIN || Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) N = glueCopyToM0(N); Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -477,7 +477,6 @@ ATOMIC_CMP_SWAP, ATOMIC_INC, ATOMIC_DEC, - ATOMIC_LOAD_FADD, ATOMIC_LOAD_FMIN, ATOMIC_LOAD_FMAX, BUFFER_LOAD, Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -4188,7 +4188,6 @@ NODE_NAME_CASE(ATOMIC_CMP_SWAP) NODE_NAME_CASE(ATOMIC_INC) NODE_NAME_CASE(ATOMIC_DEC) - NODE_NAME_CASE(ATOMIC_LOAD_FADD) NODE_NAME_CASE(ATOMIC_LOAD_FMIN) NODE_NAME_CASE(ATOMIC_LOAD_FMAX) NODE_NAME_CASE(BUFFER_LOAD) @@ -4504,7 +4503,12 @@ TargetLowering::AtomicExpansionKind AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { - if (RMW->getOperation() == AtomicRMWInst::Nand) + switch (RMW->getOperation()) { + case AtomicRMWInst::Nand: + case AtomicRMWInst::FAdd: + case AtomicRMWInst::FSub: return AtomicExpansionKind::CmpXChg; - return AtomicExpansionKind::None; + default: + return AtomicExpansionKind::None; + } } Index: lib/Target/AMDGPU/AMDGPUSubtarget.h =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.h +++ lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -779,6 +779,9 @@ return HasScalarAtomics; } + bool hasLDSFPAtomics() const { + return VIInsts; + } bool hasDPP() const { return HasDPP; Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -349,6 +349,7 @@ const SelectionDAG &DAG, bool SNaN = false, unsigned Depth = 0) const override; + AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; }; } // End namespace llvm Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -699,6 +699,7 @@ setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); + setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); setSchedulingPreference(Sched::RegPressure); @@ -5193,9 +5194,21 @@ SDLoc DL(Op); switch (IntrID) { + case Intrinsic::amdgcn_ds_fadd: { + MemSDNode *M = cast(Op); + unsigned Opc; + switch (IntrID) { + case Intrinsic::amdgcn_ds_fadd: + Opc = ISD::ATOMIC_LOAD_FADD; + break; + } + + return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), + M->getOperand(0), M->getOperand(2), M->getOperand(3), + M->getMemOperand()); + } case Intrinsic::amdgcn_atomic_inc: case Intrinsic::amdgcn_atomic_dec: - case Intrinsic::amdgcn_ds_fadd: case Intrinsic::amdgcn_ds_fmin: case Intrinsic::amdgcn_ds_fmax: { MemSDNode *M = cast(Op); @@ -5207,9 +5220,6 @@ case Intrinsic::amdgcn_atomic_dec: Opc = AMDGPUISD::ATOMIC_DEC; break; - case Intrinsic::amdgcn_ds_fadd: - Opc = AMDGPUISD::ATOMIC_LOAD_FADD; - break; case Intrinsic::amdgcn_ds_fmin: Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; break; @@ -8632,11 +8642,11 @@ case ISD::ATOMIC_LOAD_MAX: case ISD::ATOMIC_LOAD_UMIN: case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_LOAD_FADD: case AMDGPUISD::ATOMIC_INC: case AMDGPUISD::ATOMIC_DEC: - case AMDGPUISD::ATOMIC_LOAD_FADD: case AMDGPUISD::ATOMIC_LOAD_FMIN: - case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. + case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. if (DCI.isBeforeLegalize()) break; return performMemSDNodeCombine(cast(N), DCI); @@ -9335,3 +9345,29 @@ return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, SNaN, Depth); } + +TargetLowering::AtomicExpansionKind +SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { + switch (RMW->getOperation()) { + case AtomicRMWInst::FAdd: { + Type *Ty = RMW->getType(); + + // We don't have a way to support 16-bit atomics now, so just leave them + // as-is. + if (Ty->isHalfTy()) + return AtomicExpansionKind::None; + + if (!Ty->isFloatTy()) + return AtomicExpansionKind::CmpXChg; + + // TODO: Do have these for flat. Older targets also had them for buffers. + unsigned AS = RMW->getPointerAddressSpace(); + return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ? + AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg; + } + default: + break; + } + + return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); +} Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -57,10 +57,6 @@ SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1> ]>; -def SIatomic_fadd : SDNode<"AMDGPUISD::ATOMIC_LOAD_FADD", SDTAtomic2_f32, - [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] ->; - def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32, [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain] >; @@ -228,7 +224,7 @@ def atomic_inc_local : local_binary_atomic_op; def atomic_dec_local : local_binary_atomic_op; -def atomic_load_fadd_local : local_binary_atomic_op; +def atomic_load_fadd_local : local_binary_atomic_op; def atomic_load_fmin_local : local_binary_atomic_op; def atomic_load_fmax_local : local_binary_atomic_op; @@ -424,7 +420,7 @@ defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">; defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">; defm atomic_swap : SIAtomicM0Glue2 <"SWAP">; -defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 1, SDTAtomic2_f32>; +defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32>; defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32>; defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32>; Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -24870,6 +24870,8 @@ case AtomicRMWInst::Min: case AtomicRMWInst::UMax: case AtomicRMWInst::UMin: + case AtomicRMWInst::FAdd: + case AtomicRMWInst::FSub: // These always require a non-trivial set of data operations on x86. We must // use a cmpxchg loop. return AtomicExpansionKind::CmpXChg; Index: test/CodeGen/AMDGPU/local-atomics-fp.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/local-atomics-fp.ll @@ -0,0 +1,109 @@ +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX678,HAS-ATOMICS %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,HAS-ATOMICS %s +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s +; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s + +; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f32: +; GFX678-DAG: s_mov_b32 m0 +; GFX9-NOT: m0 +; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0 +; HAS-ATOMICS: ds_add_rtn_f32 v0, v0, [[K]] + +; NO-ATOMICS: ds_read_b32 +; NO-ATOMICS: v_add_f32 +; NO-ATOMICS: ds_cmpst_rtn_b32 +; NO-ATOMICS: s_cbranch_execnz +define float @lds_atomic_fadd_ret_f32(float addrspace(3)* %ptr) nounwind { + %result = atomicrmw fadd float addrspace(3)* %ptr, float 4.0 seq_cst + ret float %result +} + +; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f32: +; GFX678-DAG: s_mov_b32 m0 +; GFX9-NOT: m0 +; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0 +; HAS-ATOMICS: ds_add_f32 v0, [[K]] +define void @lds_atomic_fadd_noret_f32(float addrspace(3)* %ptr) nounwind { + %result = atomicrmw fadd float addrspace(3)* %ptr, float 4.0 seq_cst + ret void +} + +; GCN-LABEL: {{^}}lds_ds_fadd: +; VI-DAG: s_mov_b32 m0 +; GFX9-NOT: m0 +; HAS-ATOMICS-DAG: v_mov_b32_e32 [[V0:v[0-9]+]], 0x42280000 +; HAS-ATOMICS: ds_add_rtn_f32 [[V2:v[0-9]+]], [[V1:v[0-9]+]], [[V0]] offset:32 +; HAS-ATOMICS: ds_add_f32 [[V3:v[0-9]+]], [[V0]] offset:64 +; HAS-ATOMICS: s_waitcnt lgkmcnt(1) +; HAS-ATOMICS: ds_add_rtn_f32 {{v[0-9]+}}, {{v[0-9]+}}, [[V2]] +define amdgpu_kernel void @lds_ds_fadd(float addrspace(1)* %out, float addrspace(3)* %ptrf, i32 %idx) { + %idx.add = add nuw i32 %idx, 4 + %shl0 = shl i32 %idx.add, 3 + %shl1 = shl i32 %idx.add, 4 + %ptr0 = inttoptr i32 %shl0 to float addrspace(3)* + %ptr1 = inttoptr i32 %shl1 to float addrspace(3)* + %a1 = atomicrmw fadd float addrspace(3)* %ptr0, float 4.2e+1 seq_cst + %a2 = atomicrmw fadd float addrspace(3)* %ptr1, float 4.2e+1 seq_cst + %a3 = atomicrmw fadd float addrspace(3)* %ptrf, float %a1 seq_cst + store float %a3, float addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f64: +; GCN: ds_read_b64 +; GCN: v_add_f64 +; GCN: ds_cmpst_rtn_b64 +; GCN: s_cbranch_execnz +define double @lds_atomic_fadd_ret_f64(double addrspace(3)* %ptr) nounwind { + %result = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst + ret double %result +} + +; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f64: +; GCN: ds_read_b64 +; GCN: v_add_f64 +; GCN: ds_cmpst_rtn_b64 +; GCN: s_cbranch_execnz +define void @lds_atomic_fadd_noret_f64(double addrspace(3)* %ptr) nounwind { + %result = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst + ret void +} + +; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f32: +; GCN: ds_read_b32 +; GCN: v_sub_f32 +; GCN: ds_cmpst_rtn_b32 +; GCN: s_cbranch_execnz +define float @lds_atomic_fsub_ret_f32(float addrspace(3)* %ptr, float %val) nounwind { + %result = atomicrmw fsub float addrspace(3)* %ptr, float %val seq_cst + ret float %result +} + +; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f32: +; GCN: ds_read_b32 +; GCN: v_sub_f32 +; GCN: ds_cmpst_rtn_b32 +define void @lds_atomic_fsub_noret_f32(float addrspace(3)* %ptr, float %val) nounwind { + %result = atomicrmw fsub float addrspace(3)* %ptr, float %val seq_cst + ret void +} + +; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f64: +; GCN: ds_read_b64 +; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}} +; GCN: ds_cmpst_rtn_b64 + +define double @lds_atomic_fsub_ret_f64(double addrspace(3)* %ptr, double %val) nounwind { + %result = atomicrmw fsub double addrspace(3)* %ptr, double %val seq_cst + ret double %result +} + +; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f64: +; GCN: ds_read_b64 +; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}} +; GCN: ds_cmpst_rtn_b64 +; GCN: s_cbranch_execnz +define void @lds_atomic_fsub_noret_f64(double addrspace(3)* %ptr, double %val) nounwind { + %result = atomicrmw fsub double addrspace(3)* %ptr, double %val seq_cst + ret void +} Index: test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll =================================================================== --- /dev/null +++ test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=CI %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GFX9 %s + +define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) { +; CI-LABEL: @test_atomicrmw_fadd_f32_flat( +; CI-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; CI-NEXT: br label [[ATOMICRMW_START:%.*]] +; CI: atomicrmw.start: +; CI-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CI-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CI-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; CI-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CI-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CI-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CI-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CI: atomicrmw.end: +; CI-NEXT: ret float [[TMP6]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f32_flat( +; GFX9-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]] +; GFX9: atomicrmw.start: +; GFX9-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GFX9-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; GFX9-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; GFX9-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; GFX9-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; GFX9-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GFX9: atomicrmw.end: +; GFX9-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fadd_f32_global(float addrspace(1)* %ptr, float %value) { +; CI-LABEL: @test_atomicrmw_fadd_f32_global( +; CI-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; CI-NEXT: br label [[ATOMICRMW_START:%.*]] +; CI: atomicrmw.start: +; CI-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CI-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CI-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; CI-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CI-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CI-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CI-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CI: atomicrmw.end: +; CI-NEXT: ret float [[TMP6]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f32_global( +; GFX9-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]] +; GFX9: atomicrmw.start: +; GFX9-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GFX9-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; GFX9-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; GFX9-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; GFX9-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; GFX9-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GFX9: atomicrmw.end: +; GFX9-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fadd_f32_local(float addrspace(3)* %ptr, float %value) { +; CI-LABEL: @test_atomicrmw_fadd_f32_local( +; CI-NEXT: [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4 +; CI-NEXT: br label [[ATOMICRMW_START:%.*]] +; CI: atomicrmw.start: +; CI-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CI-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CI-NEXT: [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)* +; CI-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CI-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CI-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CI-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CI: atomicrmw.end: +; CI-NEXT: ret float [[TMP6]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f32_local( +; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd float addrspace(3)* [[PTR:%.*]], float [[VALUE:%.*]] seq_cst +; GFX9-NEXT: ret float [[RES]] +; + %res = atomicrmw fadd float addrspace(3)* %ptr, float %value seq_cst + ret float %res +} + +define half @test_atomicrmw_fadd_f16_flat(half* %ptr, half %value) { +; CI-LABEL: @test_atomicrmw_fadd_f16_flat( +; CI-NEXT: [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CI-NEXT: ret half [[RES]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f16_flat( +; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; GFX9-NEXT: ret half [[RES]] +; + %res = atomicrmw fadd half* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fadd_f16_global(half addrspace(1)* %ptr, half %value) { +; CI-LABEL: @test_atomicrmw_fadd_f16_global( +; CI-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CI-NEXT: ret half [[RES]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f16_global( +; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; GFX9-NEXT: ret half [[RES]] +; + %res = atomicrmw fadd half addrspace(1)* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fadd_f16_local(half addrspace(3)* %ptr, half %value) { +; CI-LABEL: @test_atomicrmw_fadd_f16_local( +; CI-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; CI-NEXT: ret half [[RES]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f16_local( +; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst +; GFX9-NEXT: ret half [[RES]] +; + %res = atomicrmw fadd half addrspace(3)* %ptr, half %value seq_cst + ret half %res +} + +define double @test_atomicrmw_fadd_f64_flat(double* %ptr, double %value) { +; CI-LABEL: @test_atomicrmw_fadd_f64_flat( +; CI-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; CI-NEXT: br label [[ATOMICRMW_START:%.*]] +; CI: atomicrmw.start: +; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CI-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CI-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CI: atomicrmw.end: +; CI-NEXT: ret double [[TMP6]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f64_flat( +; GFX9-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]] +; GFX9: atomicrmw.start: +; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; GFX9-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GFX9: atomicrmw.end: +; GFX9-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fadd_f64_global(double addrspace(1)* %ptr, double %value) { +; CI-LABEL: @test_atomicrmw_fadd_f64_global( +; CI-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8 +; CI-NEXT: br label [[ATOMICRMW_START:%.*]] +; CI: atomicrmw.start: +; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CI-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)* +; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CI-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CI: atomicrmw.end: +; CI-NEXT: ret double [[TMP6]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f64_global( +; GFX9-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8 +; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]] +; GFX9: atomicrmw.start: +; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; GFX9-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)* +; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GFX9: atomicrmw.end: +; GFX9-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double addrspace(1)* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fadd_f64_local(double addrspace(3)* %ptr, double %value) { +; CI-LABEL: @test_atomicrmw_fadd_f64_local( +; CI-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8 +; CI-NEXT: br label [[ATOMICRMW_START:%.*]] +; CI: atomicrmw.start: +; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CI-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)* +; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CI-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CI: atomicrmw.end: +; CI-NEXT: ret double [[TMP6]] +; +; GFX9-LABEL: @test_atomicrmw_fadd_f64_local( +; GFX9-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8 +; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]] +; GFX9: atomicrmw.start: +; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; GFX9-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)* +; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GFX9: atomicrmw.end: +; GFX9-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double addrspace(3)* %ptr, double %value seq_cst + ret double %res +} + Index: test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll =================================================================== --- /dev/null +++ test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll @@ -0,0 +1,201 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s + +define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) { +; GCN-LABEL: @test_atomicrmw_fadd_f32_flat( +; GCN-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; GCN-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; GCN-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fsub_f32_global(float addrspace(1)* %ptr, float %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f32_global( +; GCN-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; GCN-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; GCN-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float addrspace(1)* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fsub_f32_local(float addrspace(3)* %ptr, float %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f32_local( +; GCN-NEXT: [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)* +; GCN-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; GCN-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float addrspace(3)* %ptr, float %value seq_cst + ret float %res +} + +define half @test_atomicrmw_fsub_f16_flat(half* %ptr, half %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f16_flat( +; GCN-NEXT: [[TMP1:%.*]] = load half, half* [[PTR:%.*]], align 2 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast half* [[PTR]] to i16* +; GCN-NEXT: [[TMP3:%.*]] = bitcast half [[NEW]] to i16 +; GCN-NEXT: [[TMP4:%.*]] = bitcast half [[LOADED]] to i16 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i16* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i16 [[NEWLOADED]] to half +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret half [[TMP6]] +; + %res = atomicrmw fsub half* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fsub_f16_global(half addrspace(1)* %ptr, half %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f16_global( +; GCN-NEXT: [[TMP1:%.*]] = load half, half addrspace(1)* [[PTR:%.*]], align 2 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast half addrspace(1)* [[PTR]] to i16 addrspace(1)* +; GCN-NEXT: [[TMP3:%.*]] = bitcast half [[NEW]] to i16 +; GCN-NEXT: [[TMP4:%.*]] = bitcast half [[LOADED]] to i16 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i16 addrspace(1)* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i16 [[NEWLOADED]] to half +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret half [[TMP6]] +; + %res = atomicrmw fsub half addrspace(1)* %ptr, half %value seq_cst + ret half %res +} + +define half @test_atomicrmw_fsub_f16_local(half addrspace(3)* %ptr, half %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f16_local( +; GCN-NEXT: [[TMP1:%.*]] = load half, half addrspace(3)* [[PTR:%.*]], align 2 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast half addrspace(3)* [[PTR]] to i16 addrspace(3)* +; GCN-NEXT: [[TMP3:%.*]] = bitcast half [[NEW]] to i16 +; GCN-NEXT: [[TMP4:%.*]] = bitcast half [[LOADED]] to i16 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i16 addrspace(3)* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i16 [[NEWLOADED]] to half +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret half [[TMP6]] +; + %res = atomicrmw fsub half addrspace(3)* %ptr, half %value seq_cst + ret half %res +} + +define double @test_atomicrmw_fsub_f64_flat(double* %ptr, double %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f64_flat( +; GCN-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fsub_f64_global(double addrspace(1)* %ptr, double %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f64_global( +; GCN-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)* +; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double addrspace(1)* %ptr, double %value seq_cst + ret double %res +} + +define double @test_atomicrmw_fsub_f64_local(double addrspace(3)* %ptr, double %value) { +; GCN-LABEL: @test_atomicrmw_fsub_f64_local( +; GCN-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8 +; GCN-NEXT: br label [[ATOMICRMW_START:%.*]] +; GCN: atomicrmw.start: +; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; GCN-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)* +; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; GCN: atomicrmw.end: +; GCN-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double addrspace(3)* %ptr, double %value seq_cst + ret double %res +} Index: test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll =================================================================== --- /dev/null +++ test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll @@ -0,0 +1,112 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s + +define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float* %ptr, float %value seq_cst + ret float %res +} + +define double @test_atomicrmw_fadd_f64(double* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f64( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fadd double* %ptr, double %value seq_cst + ret double %res +} + +define float @test_atomicrmw_fadd_f32_as1(float addrspace(1)* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fadd_f32_as1( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst + ret float %res +} + +define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f32( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret float [[TMP6]] +; + %res = atomicrmw fsub float* %ptr, float %value seq_cst + ret float %res +} + +define double @test_atomicrmw_fsub_f64(double* %ptr, double %value) { +; CHECK-LABEL: @test_atomicrmw_fsub_f64( +; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8 +; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]] +; CHECK: atomicrmw.start: +; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ] +; CHECK-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst +; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1 +; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0 +; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double +; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; CHECK: atomicrmw.end: +; CHECK-NEXT: ret double [[TMP6]] +; + %res = atomicrmw fsub double* %ptr, double %value seq_cst + ret double %res +}