Index: llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h =================================================================== --- llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h +++ llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h @@ -38,6 +38,7 @@ std::function IsHazardDef = [](MachineInstr*) {return true;}); + int checkSMEMSoftClauseHazards(MachineInstr *SMEM); int checkSMRDHazards(MachineInstr *SMRD); int checkVMEMHazards(MachineInstr* VMEM); int checkDPPHazards(MachineInstr *DPP); Index: llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -132,18 +132,86 @@ // No-op Hazard Detection //===----------------------------------------------------------------------===// +static void addRegsToSet(iterator_range Ops, + std::set &Set) { + for (const MachineOperand &Op : Ops) { + if (Op.isReg()) + Set.insert(Op.getReg()); + } +} + +int GCNHazardRecognizer::checkSMEMSoftClauseHazards(MachineInstr *SMEM) { + const AMDGPUSubtarget &ST = MF.getSubtarget(); + + // SMEM soft clause are only present on VI+ + if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) + return 0; + + // A soft-clause is any group of consecutive SMEM instructions. The + // instructions in this group may return out of order and/or may be + // replayed (i.e. the same instruction issued more than once). + // + // In order to handle these situations correctly we need to make sure + // that when a clause has more than one instruction, no instruction in the + // clause writes to a register that is read another instruction in the clause + // (including itself). If we encounter this situaion, we need to break the + // clause by inserting a non SMEM instruction. + + const SIInstrInfo *TII = static_cast(ST.getInstrInfo()); + std::set ClauseDefs; + std::set ClauseUses; + + for (MachineInstr *MI : EmittedInstrs) { + + // When we hit a non-SMEM instruction then we have passed the start of the + // clause and we can stop. + if (!MI || !TII->isSMRD(*MI)) + break; + + addRegsToSet(MI->defs(), ClauseDefs); + addRegsToSet(MI->uses(), ClauseUses); + } + + if (ClauseDefs.empty()) + return 0; + + // FIXME: When we support stores, we need to make sure not to put loads and + // stores in the same clause if they use the same address. For now, just + // start a new clause whenever we see a store. + if (SMEM->mayStore()) + return 1; + + addRegsToSet(SMEM->defs(), ClauseDefs); + addRegsToSet(SMEM->uses(), ClauseUses); + + std::vector Result(std::max(ClauseDefs.size(), ClauseUses.size())); + std::vector::iterator End; + + End = std::set_intersection(ClauseDefs.begin(), ClauseDefs.end(), + ClauseUses.begin(), ClauseUses.end(), Result.begin()); + + // If the set of defs and uses intersect then we cannot add this instruction + // to the clause, so we have a hazard. + if (End != Result.begin()) + return 1; + + return 0; +} + int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) { const AMDGPUSubtarget &ST = MF.getSubtarget(); const SIInstrInfo *TII = static_cast(ST.getInstrInfo()); + int WaitStatesNeeded = 0; + + WaitStatesNeeded = checkSMEMSoftClauseHazards(SMRD); // This SMRD hazard only affects SI. if (ST.getGeneration() != AMDGPUSubtarget::SOUTHERN_ISLANDS) - return 0; + return WaitStatesNeeded; // A read of an SGPR by SMRD instruction requires 4 wait states when the // SGPR was written by a VALU instruction. int SmrdSgprWaitStates = 4; - int WaitStatesNeeded = 0; auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); }; for (const MachineOperand &Use : SMRD->uses()) { Index: llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp @@ -314,8 +314,7 @@ // and destination registers don't overlap, e.g. this is illegal: // r0 = load r2 // r2 = load r0 - if ((LastOpcodeType == SMEM && TII->isSMRD(*I)) || - (LastOpcodeType == VMEM && Increment.Named.VM)) { + if (LastOpcodeType == VMEM && Increment.Named.VM) { // Insert a NOP to break the clause. BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)) .addImm(0); Index: llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll +++ llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll @@ -23,8 +23,8 @@ ; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i64_offset: ; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd -; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c -; VI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34 +; VI-DAG: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c +; VI-DAG: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34 ; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7 ; GCN-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], 0 ; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] @@ -75,8 +75,8 @@ ; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_noret_i64_offset: ; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9 ; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb -; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24 -; VI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c +; VI-DAG: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24 +; VI-DAG: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7 ; GCN-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], 0 ; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] Index: llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll +++ llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll @@ -1,11 +1,11 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s ; FIXME: Check something here. Currently it seems fabs + fneg aren't ; into 2 modifiers, although theoretically that should work. -; FUNC-LABEL: {{^}}fneg_fabs_fadd_f64: -; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}| +; GCN-LABEL: {{^}}fneg_fabs_fadd_f64: +; GCN: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}| define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) { %fabs = call double @llvm.fabs.f64(double %x) %fsub = fsub double -0.000000e+00, %fabs @@ -24,8 +24,8 @@ ret void } -; FUNC-LABEL: {{^}}fneg_fabs_fmul_f64: -; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}| +; GCN-LABEL: {{^}}fneg_fabs_fmul_f64: +; GCN: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}| define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) { %fabs = call double @llvm.fabs.f64(double %x) %fsub = fsub double -0.000000e+00, %fabs @@ -34,7 +34,7 @@ ret void } -; FUNC-LABEL: {{^}}fneg_fabs_free_f64: +; GCN-LABEL: {{^}}fneg_fabs_free_f64: define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) { %bc = bitcast i64 %in to double %fabs = call double @llvm.fabs.f64(double %bc) @@ -43,9 +43,9 @@ ret void } -; FUNC-LABEL: {{^}}fneg_fabs_fn_free_f64: -; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN-LABEL: {{^}}fneg_fabs_fn_free_f64: +; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) { %bc = bitcast i64 %in to double %fabs = call double @fabs(double %bc) @@ -54,13 +54,14 @@ ret void } -; FUNC-LABEL: {{^}}fneg_fabs_f64: -; SI: s_load_dwordx2 -; SI: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}} -; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} -; SI-DAG: v_or_b32_e32 v[[HI_V:[0-9]+]], s[[HI_X]], [[IMMREG]] -; SI-DAG: v_mov_b32_e32 v[[LO_V:[0-9]+]], s[[LO_X]] -; SI: buffer_store_dwordx2 v{{\[}}[[LO_V]]:[[HI_V]]{{\]}} +; GCN-LABEL: {{^}}fneg_fabs_f64: +; GCN: s_load_dwordx2 +; GCN-DAG: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} +; SI-DAG: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0xb +; VI-DAG: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0x2c +; GCN-DAG: v_or_b32_e32 v[[HI_V:[0-9]+]], s[[HI_X]], [[IMMREG]] +; GCN-DAG: v_mov_b32_e32 v[[LO_V:[0-9]+]], s[[LO_X]] +; GCN: buffer_store_dwordx2 v{{\[}}[[LO_V]]:[[HI_V]]{{\]}} define void @fneg_fabs_f64(double addrspace(1)* %out, double %in) { %fabs = call double @llvm.fabs.f64(double %in) %fsub = fsub double -0.000000e+00, %fabs @@ -68,11 +69,11 @@ ret void } -; FUNC-LABEL: {{^}}fneg_fabs_v2f64: -; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} -; SI-NOT: 0x80000000 -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN-LABEL: {{^}}fneg_fabs_v2f64: +; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} +; GCN-NOT: 0x80000000 +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] define void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) { %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in) %fsub = fsub <2 x double> , %fabs @@ -80,13 +81,13 @@ ret void } -; FUNC-LABEL: {{^}}fneg_fabs_v4f64: -; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} -; SI-NOT: 0x80000000 -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] -; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN-LABEL: {{^}}fneg_fabs_v4f64: +; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}} +; GCN-NOT: 0x80000000 +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] +; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]] define void @fneg_fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) { %fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in) %fsub = fsub <4 x double> , %fabs Index: llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll +++ llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll @@ -2,18 +2,15 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s ; FUNC-LABEL: {{^}}cluster_arg_loads: -; FIXME: Due to changes in the load clustering heuristics. We now longer -; cluster all argument loads together. +; FIXME: Due to changes in the load clustering heuristics. We no longer +; cluster all argument loads together on SI. ; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd ; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe ; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9 ; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb ; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34 -; VI-NEXT: s_nop 0 ; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38 -; VI-NEXT: s_nop 0 -; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24 -; VI-NEXT: s_nop 0 +; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24 ; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind { store i32 %x, i32 addrspace(1)* %out0, align 4