Index: llvm/lib/Transforms/Scalar/Sink.cpp =================================================================== --- llvm/lib/Transforms/Scalar/Sink.cpp +++ llvm/lib/Transforms/Scalar/Sink.cpp @@ -141,27 +141,34 @@ unsigned Num = PHINode::getIncomingValueNumForOperand(U.getOperandNo()); UseBlock = PN->getIncomingBlock(Num); } - if (SuccToSinkTo) + if (SuccToSinkTo) { SuccToSinkTo = DT.findNearestCommonDominator(SuccToSinkTo, UseBlock); - else + if (!SuccToSinkTo) // findNearestCommonDominator fails. + return false; + } else SuccToSinkTo = UseBlock; // The current basic block needs to dominate the candidate. if (!DT.dominates(BB, SuccToSinkTo)) return false; } - if (SuccToSinkTo) { - // The nearest common dominator may be in a parent loop of BB, which may not - // be beneficial. Find an ancestor. - while (SuccToSinkTo != BB && - !IsAcceptableTarget(Inst, SuccToSinkTo, DT, LI)) - SuccToSinkTo = DT.getNode(SuccToSinkTo)->getIDom()->getBlock(); - if (SuccToSinkTo == BB) - SuccToSinkTo = nullptr; + // No candidate block to sink to. + if (!SuccToSinkTo || SuccToSinkTo == BB) + return false; + + // Nearest common dominator is the lowest point we may sink the instruction + // to. While instruction sinking approach is still empirical, performance + // regressions have been observed related to register pressure. As a result, + // we try to sink the instruction to the earliest possible block. + // + // Find the block that BB immediately dominates to sink this instruction to. + BasicBlock *CurBB = DT.getNode(SuccToSinkTo)->getIDom()->getBlock(); + while (CurBB != BB ) { + SuccToSinkTo = CurBB; + CurBB = DT.getNode(CurBB)->getIDom()->getBlock(); } - // If we couldn't find a block to sink to, ignore this instruction. - if (!SuccToSinkTo) + if (!IsAcceptableTarget(Inst, SuccToSinkTo, DT, LI)) return false; LLVM_DEBUG(dbgs() << "Sink" << *Inst << " ("; Index: llvm/test/CodeGen/AMDGPU/sdiv64.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -240,15 +240,15 @@ ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB0_7: ; %udiv-end +; GCN-IR-NEXT: BB0_7: ; %Flow7 ; GCN-IR-NEXT: s_xor_b64 s[0:1], s[8:9], s[2:3] ; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s1 ; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = sdiv i64 %x, %y @@ -411,26 +411,26 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v10 ; GCN-IR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10 ; GCN-IR-NEXT: v_cndmask_b32_e32 v14, v7, v0, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v7, vcc, v13, v14 -; GCN-IR-NEXT: v_subb_u32_e64 v8, s[4:5], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[7:8] -; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[4:5], 63, v[7:8] +; GCN-IR-NEXT: v_sub_i32_e32 v11, vcc, v13, v14 +; GCN-IR-NEXT: v_subb_u32_e64 v12, s[4:5], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[11:12] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[4:5], 63, v[11:12] ; GCN-IR-NEXT: s_or_b64 s[6:7], s[6:7], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], s[6:7], -1 ; GCN-IR-NEXT: v_mov_b32_e32 v18, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v6, v4 ; GCN-IR-NEXT: v_mov_b32_e32 v1, v5 -; GCN-IR-NEXT: v_cndmask_b32_e64 v12, v10, 0, s[6:7] +; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v10, 0, s[6:7] ; GCN-IR-NEXT: s_and_b64 s[4:5], s[8:9], s[4:5] ; GCN-IR-NEXT: v_mov_b32_e32 v15, v18 ; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v9, 0, s[6:7] ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz BB1_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, 1, v7 -; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, 0, v8, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 63, v7 -; GCN-IR-NEXT: v_cmp_ge_u64_e32 vcc, v[16:17], v[7:8] +; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, 1, v11 +; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, 0, v12, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 63, v11 +; GCN-IR-NEXT: v_cmp_ge_u64_e32 vcc, v[16:17], v[11:12] ; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 ; GCN-IR-NEXT: v_lshl_b64 v[7:8], v[9:10], v0 ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 @@ -480,14 +480,14 @@ ; GCN-IR-NEXT: BB1_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[7:8], 1 -; GCN-IR-NEXT: v_or_b32_e32 v12, v12, v3 +; GCN-IR-NEXT: v_or_b32_e32 v7, v12, v3 ; GCN-IR-NEXT: v_or_b32_e32 v0, v11, v2 ; GCN-IR-NEXT: BB1_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] ; GCN-IR-NEXT: v_xor_b32_e32 v2, v5, v4 ; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v2 ; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v6 -; GCN-IR-NEXT: v_xor_b32_e32 v3, v12, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v3, v7, v1 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v3, v1, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] @@ -1111,7 +1111,7 @@ ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB9_7: ; %udiv-end +; GCN-IR-NEXT: BB9_7: ; %Flow4 ; GCN-IR-NEXT: s_xor_b64 s[0:1], s[6:7], s[2:3] ; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 @@ -1341,9 +1341,9 @@ ; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 ; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = sdiv i64 24, %x Index: llvm/test/CodeGen/AMDGPU/srem64.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/srem64.ll +++ llvm/test/CodeGen/AMDGPU/srem64.ll @@ -218,16 +218,14 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s11, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s10, -1 -; GCN-IR-NEXT: s_mov_b32 s8, s4 -; GCN-IR-NEXT: s_mov_b32 s9, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, %y store i64 %result, i64 addrspace(1)* %out @@ -1028,56 +1026,56 @@ ; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 ; GCN-IR-NEXT: s_ashr_i64 s[10:11], s[0:1], 31 ; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31 -; GCN-IR-NEXT: s_mov_b32 s1, s0 ; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 31 ; GCN-IR-NEXT: s_mov_b32 s3, s2 +; GCN-IR-NEXT: s_mov_b32 s1, s0 ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[2:3] -; GCN-IR-NEXT: s_xor_b64 s[10:11], s[10:11], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s8, s6, s2 -; GCN-IR-NEXT: s_subb_u32 s9, s7, s2 -; GCN-IR-NEXT: s_sub_u32 s10, s10, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s12, s10 -; GCN-IR-NEXT: s_subb_u32 s11, s11, s0 +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[10:11], s[0:1] +; GCN-IR-NEXT: s_sub_u32 s6, s6, s2 +; GCN-IR-NEXT: s_subb_u32 s7, s7, s2 +; GCN-IR-NEXT: s_sub_u32 s8, s8, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s8 +; GCN-IR-NEXT: s_subb_u32 s9, s9, s0 ; GCN-IR-NEXT: s_add_i32 s12, s12, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s13, s11 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s9 ; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 -; GCN-IR-NEXT: s_flbit_i32_b32 s12, s8 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s6 ; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 ; GCN-IR-NEXT: s_add_i32 s12, s12, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s13, s9 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s7 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 ; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 ; GCN-IR-NEXT: v_subb_u32_e64 v1, s[12:13], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] ; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc ; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[0:1], -1 -; GCN-IR-NEXT: s_and_b64 s[6:7], s[6:7], vcc -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[6:7] +; GCN-IR-NEXT: s_xor_b64 s[10:11], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[10:11], s[10:11], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_vccz BB8_4 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 ; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc ; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[8:9], v0 +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[0:1] ; GCN-IR-NEXT: s_cbranch_vccz BB8_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: s_add_u32 s6, s10, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v4 +; GCN-IR-NEXT: s_add_u32 s10, s8, -1 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s7, s11, -1 +; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 @@ -1088,13 +1086,13 @@ ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 ; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s11 +; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s10, v6 ; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc ; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s10, v8 +; GCN-IR-NEXT: v_and_b32_e32 v10, s8, v8 ; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s11, v8 +; GCN-IR-NEXT: v_and_b32_e32 v11, s9, v8 ; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 ; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 ; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc @@ -1109,9 +1107,9 @@ ; GCN-IR-NEXT: s_cbranch_vccz BB8_3 ; GCN-IR-NEXT: s_branch BB8_6 ; GCN-IR-NEXT: BB8_4: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s9 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 ; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 ; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] ; GCN-IR-NEXT: s_branch BB8_7 ; GCN-IR-NEXT: BB8_5: @@ -1122,22 +1120,22 @@ ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 ; GCN-IR-NEXT: BB8_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s10, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s10, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s11, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s10, v0 -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: v_mul_lo_u32 v1, s8, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s8, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s9, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s8, v0 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc ; GCN-IR-NEXT: v_xor_b32_e32 v0, s2, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 ; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %1 = ashr i64 %x, 31 @@ -1527,11 +1525,11 @@ ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm - %result = srem i64 24, %x + %result = srem i64 24, %x store i64 %result, i64 addrspace(1)* %out ret void } Index: llvm/test/CodeGen/AMDGPU/urem64.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/urem64.ll +++ llvm/test/CodeGen/AMDGPU/urem64.ll @@ -218,16 +218,14 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s11, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s10, -1 -; GCN-IR-NEXT: s_mov_b32 s8, s4 -; GCN-IR-NEXT: s_mov_b32 s9, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, %y store i64 %result, i64 addrspace(1)* %out @@ -940,15 +938,13 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, s6, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v3, s7, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v0, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-IR-NEXT: s_mov_b32 s2, -1 -; GCN-IR-NEXT: s_mov_b32 s0, s4 -; GCN-IR-NEXT: s_mov_b32 s1, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 24, %x store i64 %result, i64 addrspace(1)* %out @@ -1140,15 +1136,13 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, v0, 24 ; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, 24 ; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, 24 -; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s0, s4 -; GCN-IR-NEXT: s_mov_b32 s1, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, 24 store i64 %result, i64 addrspace(1)* %out