Index: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -886,6 +886,10 @@ } void GCNPassConfig::addPreEmitPass() { + addPass(createSIMemoryLegalizerPass()); + addPass(createSIInsertWaitcntsPass()); + addPass(createSIShrinkInstructionsPass()); + // The hazard recognizer that runs as part of the post-ra scheduler does not // guarantee to be able handle all hazards correctly. This is because if there // are multiple scheduling regions in a basic block, the regions are scheduled @@ -894,11 +898,11 @@ // // Here we add a stand-alone hazard recognizer pass which can handle all // cases. + // + // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would + // be better for it to emit S_NOP when possible. addPass(&PostRAHazardRecognizerID); - addPass(createSIMemoryLegalizerPass()); - addPass(createSIInsertWaitcntsPass()); - addPass(createSIShrinkInstructionsPass()); addPass(&SIInsertSkipsPassID); addPass(createSIDebuggerInsertNopsPass()); addPass(&BranchRelaxationPassID); Index: llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -737,7 +737,7 @@ case SIAtomicScope::WAVEFRONT: case SIAtomicScope::SINGLETHREAD: // The L1 cache keeps all memory operations in order for - // wavesfronts in the same work-group. + // wavefronts in the same work-group. break; default: llvm_unreachable("Unsupported synchronization scope"); Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll @@ -6,7 +6,9 @@ ; VI-LABEL: {{^}}dpp_test: ; VI: v_mov_b32_e32 v0, s{{[0-9]+}} ; VI-NOOPT: v_mov_b32_e32 v1, s{{[0-9]+}} -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI-OPT: v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11] ; VI-NOOPT: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11] define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) { @@ -18,10 +20,14 @@ ; VI-LABEL: {{^}}dpp_wait_states: ; VI-NOOPT: v_mov_b32_e32 [[VGPR1:v[0-9]+]], s{{[0-9]+}} ; VI: v_mov_b32_e32 [[VGPR0:v[0-9]+]], s{{[0-9]+}} -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI-OPT: v_mov_b32_dpp [[VGPR0]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI: s_nop 1 +; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl: +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI-OPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; VI-NOOPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) { @@ -35,11 +41,14 @@ ; VI: ; %endif ; VI-OPT: s_mov_b32 ; VI-OPT: s_mov_b32 -; VI-NOOPT: s_nop 1 +; VI-NOOPT: s_waitcnt +; VI-NOOPT-NEXT: s_nop 0 ; VI: v_mov_b32_dpp [[VGPR0:v[0-9]+]], v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI: s_nop 1 +; VI-OPT: s_nop 1 ; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 define amdgpu_kernel void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) { %cmp = fcmp oeq float %cond, 0.0 Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll @@ -4,7 +4,9 @@ ; VI-LABEL: {{^}}dpp_test: ; VI: v_mov_b32_e32 v0, s{{[0-9]+}} ; VI: v_mov_b32_e32 v1, s{{[0-9]+}} -; VI: s_nop 1 +; VI-OPT: s_nop 1 +; VI-NOOPT: s_nop 0 +; VI-NOOPT: s_nop 0 ; VI: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11] define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in1, i32 %in2) { %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 1) #0 @@ -12,6 +14,33 @@ ret void } +; VI-LABEL: {{^}}dpp_test1: +; VI-OPT: v_add_u32_e32 [[REG:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}} +; VI-NOOPT: v_mov_b32_e32 v{{[0-9]+}}, 0 +; VI-NOOPT: v_mov_b32_e32 [[REG:v[0-9]+]], v{{[0-9]+}} +; VI-NEXT: s_nop 0 +; VI-NEXT: s_nop 0 +; VI-NEXT: v_mov_b32_dpp v2, [[REG]] quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf +@0 = internal unnamed_addr addrspace(3) global [448 x i32] undef, align 4 +define weak_odr amdgpu_kernel void @dpp_test1(i32* %arg) local_unnamed_addr { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds [448 x i32], [448 x i32] addrspace(3)* @0, i32 0, i32 %tmp + %tmp3 = load i32, i32 addrspace(3)* %tmp2, align 4 + fence syncscope("workgroup") release + tail call void @llvm.amdgcn.s.barrier() + fence syncscope("workgroup") acquire + %tmp4 = add nsw i32 %tmp3, %tmp3 + %tmp5 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp4, i32 177, i32 15, i32 15, i1 zeroext false) + %tmp6 = add nsw i32 %tmp5, %tmp4 + %tmp7 = getelementptr inbounds i32, i32* %arg, i64 %tmp1 + store i32 %tmp6, i32* %tmp7, align 4 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() +declare void @llvm.amdgcn.s.barrier() declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #0 attributes #0 = { nounwind readnone convergent } Index: llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll +++ llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll @@ -77,6 +77,7 @@ ; GCN-NEXT: buffer_load_dword ; GCN-NEXT: buffer_load_dword ; GCN-NEXT: s_nop +; GCN-NEXT: s_nop ; GCN-NEXT: buffer_load_dword define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %arg, <4 x i32> addrspace(5)* noalias nocapture %arg1) { bb: @@ -105,8 +106,9 @@ ; GCN-LABEL: {{^}}vector_clause_indirect: ; GCN: global_load_dwordx2 [[ADDR:v\[[0-9:]+\]]], v[{{[0-9:]+}}], off -; GCN-NEXT: s_nop +; GCN-NEXT: s_nop 0 ; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_nop 0 ; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off ; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off offset:16 define amdgpu_kernel void @vector_clause_indirect(i64 addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture readnone %arg1, <4 x i32> addrspace(1)* noalias nocapture %arg2) { @@ -128,6 +130,7 @@ ; GCN-LABEL: {{^}}load_global_d16_hi: ; GCN: global_load_short_d16_hi v ; GCN-NEXT: s_nop +; GCN-NEXT: s_nop ; GCN-NEXT: global_load_short_d16_hi v define void @load_global_d16_hi(i16 addrspace(1)* %in, i16 %reg, <2 x i16> addrspace(1)* %out) { entry: @@ -147,6 +150,7 @@ ; GCN-LABEL: {{^}}load_global_d16_lo: ; GCN: global_load_short_d16 v ; GCN-NEXT: s_nop +; GCN-NEXT: s_nop ; GCN-NEXT: global_load_short_d16 v define void @load_global_d16_lo(i16 addrspace(1)* %in, i32 %reg, <2 x i16> addrspace(1)* %out) { entry: