Index: lib/Target/AMDGPU/SIInsertSkips.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertSkips.cpp +++ lib/Target/AMDGPU/SIInsertSkips.cpp @@ -137,6 +137,10 @@ if (TII->hasUnwantedEffectsWhenEXECEmpty(*I)) return true; + // These instructions are potentially expensive even if EXEC = 0. + if (TII->isSMRD(*I) || TII->isVMEM(*I) || I->getOpcode() == AMDGPU::S_WAITCNT) + return true; + ++NumInstr; if (NumInstr >= SkipThreshold) return true; Index: test/CodeGen/AMDGPU/collapse-endcf.ll =================================================================== --- test/CodeGen/AMDGPU/collapse-endcf.ll +++ test/CodeGen/AMDGPU/collapse-endcf.ll @@ -7,6 +7,7 @@ ; GCN-NEXT: s_cbranch_execz [[ENDIF]] ; GCN: s_and_b64 exec, exec, vcc ; GCN-NEXT: ; mask branch [[ENDIF]] +; GCN-NEXT: s_cbranch_execz [[ENDIF]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword ; GCN-NEXT: {{^}}[[ENDIF]]: @@ -46,6 +47,7 @@ ; GCN-NEXT: s_cbranch_execz [[ENDIF_OUTER]] ; GCN: s_and_saveexec_b64 [[SAVEEXEC_INNER:s\[[0-9:]+\]]] ; GCN-NEXT: ; mask branch [[ENDIF_INNER:BB[0-9_]+]] +; GCN-NEXT: s_cbranch_execz [[ENDIF_INNER]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword ; GCN-NEXT: {{^}}[[ENDIF_INNER]]: @@ -91,6 +93,7 @@ ; GCN: s_and_saveexec_b64 [[SAVEEXEC_INNER:s\[[0-9:]+\]]] ; GCN-NEXT: s_xor_b64 [[SAVEEXEC_INNER2:s\[[0-9:]+\]]], exec, [[SAVEEXEC_INNER]] ; GCN-NEXT: ; mask branch [[THEN_INNER:BB[0-9_]+]] +; GCN-NEXT: s_cbranch_execz [[THEN_INNER]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword ; GCN-NEXT: {{^}}[[THEN_INNER]]: @@ -140,6 +143,7 @@ ; GCN: store_dword ; GCN-NEXT: s_and_saveexec_b64 [[SAVEEXEC_INNER_IF_OUTER_ELSE:s\[[0-9:]+\]]] ; GCN-NEXT: ; mask branch [[THEN_OUTER_FLOW:BB[0-9_]+]] +; GCN-NEXT: s_cbranch_execz [[THEN_OUTER_FLOW]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword ; GCN-NEXT: {{^}}[[THEN_OUTER_FLOW]]: @@ -153,6 +157,7 @@ ; GCN: store_dword ; GCN-NEXT: s_and_saveexec_b64 [[SAVEEXEC_INNER_IF_OUTER_THEN:s\[[0-9:]+\]]] ; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9_]+]] +; GCN-NEXT: s_cbranch_execz [[FLOW1]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword ; GCN-NEXT: [[FLOW1]]: @@ -199,6 +204,7 @@ ; ALL-LABEL: {{^}}s_endpgm_unsafe_barrier: ; GCN: s_and_saveexec_b64 [[SAVEEXEC:s\[[0-9:]+\]]] ; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9_]+]] +; GCN-NEXT: s_cbranch_execz [[ENDIF]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword ; GCN-NEXT: {{^}}[[ENDIF]]: Index: test/CodeGen/AMDGPU/skip-if-dead.ll =================================================================== --- test/CodeGen/AMDGPU/skip-if-dead.ll +++ test/CodeGen/AMDGPU/skip-if-dead.ll @@ -367,6 +367,7 @@ ; CHECK: v_cmp_neq_f32_e32 vcc, 0, ; CHECK: s_and_saveexec_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc ; CHECK: mask branch [[END:BB[0-9]+_[0-9]+]] +; CHECK-NEXT: s_cbranch_execz [[END]] ; CHECK-NOT: branch ; CHECK: BB{{[0-9]+_[0-9]+}}: ; %bb8 Index: test/CodeGen/AMDGPU/valu-i1.ll =================================================================== --- test/CodeGen/AMDGPU/valu-i1.ll +++ test/CodeGen/AMDGPU/valu-i1.ll @@ -66,6 +66,7 @@ ; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}} ; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI-NEXT: ; mask branch [[EXIT:BB[0-9]+_[0-9]+]] +; SI-NEXT: s_cbranch_execz [[EXIT]] ; SI-NEXT: BB{{[0-9]+_[0-9]+}}: ; SI: buffer_store_dword @@ -92,6 +93,7 @@ ; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}} ; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI-NEXT: ; mask branch [[EXIT:BB[0-9]+_[0-9]+]] +; SI-NEXT: s_cbranch_execz [[EXIT]] ; SI-NEXT: BB{{[0-9]+_[0-9]+}}: ; SI: buffer_store_dword @@ -129,6 +131,7 @@ ; SI-NEXT: s_or_saveexec_b64 ; SI-NEXT: s_xor_b64 exec, exec ; SI-NEXT: ; mask branch [[UNIFIED_RETURN:BB[0-9]+_[0-9]+]] +; SI-NEXT: s_cbranch_execz [[UNIFIED_RETURN]] ; SI-NEXT: {{^BB[0-9]+_[0-9]+}}: ; %then ; SI: s_waitcnt Index: test/CodeGen/AMDGPU/wqm.ll =================================================================== --- test/CodeGen/AMDGPU/wqm.ll +++ test/CodeGen/AMDGPU/wqm.ll @@ -423,6 +423,7 @@ ;CHECK-NEXT: s_and_b64 [[SAVED]], exec, [[SAVED]] ;CHECK-NEXT: s_xor_b64 exec, exec, [[SAVED]] ;CHECK-NEXT: mask branch [[END_BB:BB[0-9]+_[0-9]+]] +;CHECK-NEXT: s_cbranch_execz [[END_BB]] ;CHECK-NEXT: BB{{[0-9]+_[0-9]+}}: ; %ELSE ;CHECK: store_dword ;CHECK: [[END_BB]]: ; %END