diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -1657,6 +1657,16 @@ return CopyMI; } + // Keep undef subreg copies if they're used in early-clobber instructions + if (DstSubIdx) { + for (const MachineInstr &Use : MRI->use_nodbg_instructions(DstReg)) { + for (const MachineOperand &Def : Use.defs()) { + if (Def.isEarlyClobber()) + return nullptr; + } + } + } + // Remove any DstReg segments starting at the instruction. LLVM_DEBUG(dbgs() << "\tEliminating copy of value\n"); diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1905,9 +1905,20 @@ MachineOperand &UseMO = MI.getOperand(i); Register SrcReg = UseMO.getReg(); unsigned SubIdx = MI.getOperand(i+1).getImm(); - // Nothing needs to be inserted for undef operands. - if (UseMO.isUndef()) - continue; + + // Nothing needs to be inserted for undef operands if it's not used in an + // early-clobber instruction + bool UndefUseByEC = false; + if (UseMO.isUndef()) { + for (const MachineInstr &Use : MRI->use_nodbg_instructions(DstReg)) { + for (const MachineOperand &Def : Use.defs()) { + if (Def.isEarlyClobber()) + UndefUseByEC = true; + } + } + if (!UndefUseByEC) + continue; + } // Defer any kill flag to the last operand using SrcReg. Otherwise, we // might insert a COPY that uses SrcReg after is was killed. diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll @@ -393,6 +393,7 @@ ; GFX11W64-NEXT: s_or_b64 exec, exec, s[6:7] ; GFX11W64-NEXT: s_waitcnt vmcnt(0) ; GFX11W64-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11W64-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX11W64-NEXT: s_waitcnt lgkmcnt(0) ; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11W64-NEXT: v_mad_u64_u32 v[1:2], null, s8, v0, s[0:1] @@ -425,6 +426,7 @@ ; GFX11W32-NEXT: s_or_b32 exec_lo, exec_lo, s5 ; GFX11W32-NEXT: s_waitcnt vmcnt(0) ; GFX11W32-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11W32-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX11W32-NEXT: s_waitcnt lgkmcnt(0) ; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11W32-NEXT: v_mad_u64_u32 v[1:2], null, s4, v0, s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll @@ -441,6 +441,7 @@ ; GFX1164-NEXT: .LBB1_2: ; GFX1164-NEXT: s_or_b64 exec, exec, s[0:1] ; GFX1164-NEXT: v_readfirstlane_b32 s0, v1 +; GFX1164-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX1164-NEXT: s_waitcnt lgkmcnt(0) ; GFX1164-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1164-NEXT: s_mov_b32 s6, -1 @@ -480,6 +481,7 @@ ; GFX1132-NEXT: .LBB1_2: ; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s1 ; GFX1132-NEXT: v_readfirstlane_b32 s2, v1 +; GFX1132-NEXT: ; kill: def $sgpr3 killed $sgpr3 ; GFX1132-NEXT: s_waitcnt lgkmcnt(0) ; GFX1132-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1132-NEXT: s_mov_b32 s6, -1 @@ -1378,14 +1380,16 @@ ; GFX1164-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX1164-NEXT: v_readfirstlane_b32 s2, v0 ; GFX1164-NEXT: v_readfirstlane_b32 s3, v1 +; GFX1164-NEXT: ; kill: def $vgpr4 killed $vgpr4 ; GFX1164-NEXT: s_waitcnt lgkmcnt(0) ; GFX1164-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1164-NEXT: s_mov_b32 s6, -1 ; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3] -; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s1, v2, v[1:2] -; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1164-NEXT: v_mov_b32_e32 v1, v3 +; GFX1164-NEXT: v_mov_b32_e32 v3, v1 +; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[3:4] +; GFX1164-NEXT: v_mov_b32_e32 v1, v5 ; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0 ; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX1164-NEXT: s_endpgm @@ -1425,14 +1429,16 @@ ; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s2 ; GFX1132-NEXT: v_readfirstlane_b32 s2, v0 ; GFX1132-NEXT: v_readfirstlane_b32 s3, v1 +; GFX1132-NEXT: ; kill: def $vgpr4 killed $vgpr4 ; GFX1132-NEXT: s_waitcnt lgkmcnt(0) ; GFX1132-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1132-NEXT: s_mov_b32 s6, -1 ; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3] -; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s1, v2, v[1:2] -; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1132-NEXT: v_mov_b32_e32 v1, v3 +; GFX1132-NEXT: v_mov_b32_e32 v3, v1 +; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[3:4] +; GFX1132-NEXT: v_mov_b32_e32 v1, v5 ; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0 ; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX1132-NEXT: s_endpgm @@ -3007,15 +3013,16 @@ ; GFX1164-NEXT: s_or_b64 exec, exec, s[2:3] ; GFX1164-NEXT: s_waitcnt lgkmcnt(0) ; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s0, v2, 0 +; GFX1164-NEXT: ; kill: def $vgpr5 killed $vgpr5 ; GFX1164-NEXT: v_readfirstlane_b32 s0, v0 ; GFX1164-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1164-NEXT: s_mov_b32 s6, -1 ; GFX1164-NEXT: s_waitcnt_depctr 0xfff -; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[4:5] +; GFX1164-NEXT: v_mad_u64_u32 v[6:7], null, s1, v2, v[4:5] ; GFX1164-NEXT: v_readfirstlane_b32 s1, v1 ; GFX1164-NEXT: v_sub_co_u32 v0, vcc, s0, v3 ; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1164-NEXT: v_mov_b32_e32 v1, v5 +; GFX1164-NEXT: v_mov_b32_e32 v1, v6 ; GFX1164-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s1, v1, vcc ; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0 ; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) @@ -3056,15 +3063,16 @@ ; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s2 ; GFX1132-NEXT: s_waitcnt lgkmcnt(0) ; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s0, v2, 0 +; GFX1132-NEXT: ; kill: def $vgpr5 killed $vgpr5 ; GFX1132-NEXT: v_readfirstlane_b32 s0, v0 ; GFX1132-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1132-NEXT: s_mov_b32 s6, -1 ; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[4:5] +; GFX1132-NEXT: v_mad_u64_u32 v[6:7], null, s1, v2, v[4:5] ; GFX1132-NEXT: v_readfirstlane_b32 s1, v1 ; GFX1132-NEXT: v_sub_co_u32 v0, vcc_lo, s0, v3 ; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1132-NEXT: v_mov_b32_e32 v1, v5 +; GFX1132-NEXT: v_mov_b32_e32 v1, v6 ; GFX1132-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo ; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0 ; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll @@ -430,6 +430,7 @@ ; GFX1164-NEXT: .LBB1_2: ; GFX1164-NEXT: s_or_b64 exec, exec, s[0:1] ; GFX1164-NEXT: v_readfirstlane_b32 s0, v1 +; GFX1164-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX1164-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1164-NEXT: s_waitcnt lgkmcnt(0) ; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) @@ -465,6 +466,7 @@ ; GFX1132-NEXT: .LBB1_2: ; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s1 ; GFX1132-NEXT: v_readfirstlane_b32 s2, v1 +; GFX1132-NEXT: ; kill: def $sgpr3 killed $sgpr3 ; GFX1132-NEXT: s_mov_b32 s7, 0x31016000 ; GFX1132-NEXT: s_mov_b32 s6, -1 ; GFX1132-NEXT: s_waitcnt lgkmcnt(0) @@ -1537,14 +1539,16 @@ ; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX1164-NEXT: v_readfirstlane_b32 s4, v0 ; GFX1164-NEXT: v_readfirstlane_b32 s5, v1 +; GFX1164-NEXT: ; kill: def $vgpr4 killed $vgpr4 ; GFX1164-NEXT: s_waitcnt lgkmcnt(0) ; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s2, v2, s[4:5] ; GFX1164-NEXT: s_mov_b32 s2, -1 -; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s3, v2, v[1:2] +; GFX1164-NEXT: v_mov_b32_e32 v3, v1 +; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[3:4] ; GFX1164-NEXT: s_mov_b32 s3, 0x31016000 -; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1164-NEXT: v_mov_b32_e32 v1, v3 +; GFX1164-NEXT: v_mov_b32_e32 v1, v5 ; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0 ; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX1164-NEXT: s_endpgm @@ -1578,14 +1582,16 @@ ; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX1132-NEXT: v_readfirstlane_b32 s4, v0 ; GFX1132-NEXT: v_readfirstlane_b32 s5, v1 +; GFX1132-NEXT: ; kill: def $vgpr4 killed $vgpr4 ; GFX1132-NEXT: s_waitcnt lgkmcnt(0) ; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s2, v2, s[4:5] ; GFX1132-NEXT: s_mov_b32 s2, -1 -; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s3, v2, v[1:2] +; GFX1132-NEXT: v_mov_b32_e32 v3, v1 +; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[3:4] ; GFX1132-NEXT: s_mov_b32 s3, 0x31016000 -; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1132-NEXT: v_mov_b32_e32 v1, v3 +; GFX1132-NEXT: v_mov_b32_e32 v1, v5 ; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0 ; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX1132-NEXT: s_endpgm @@ -3229,15 +3235,16 @@ ; GFX1164-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX1164-NEXT: s_waitcnt lgkmcnt(0) ; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s2, v2, 0 +; GFX1164-NEXT: ; kill: def $vgpr5 killed $vgpr5 ; GFX1164-NEXT: v_readfirstlane_b32 s2, v0 ; GFX1164-NEXT: v_readfirstlane_b32 s4, v1 ; GFX1164-NEXT: s_waitcnt_depctr 0xfff -; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[4:5] +; GFX1164-NEXT: v_mad_u64_u32 v[6:7], null, s3, v2, v[4:5] ; GFX1164-NEXT: v_sub_co_u32 v0, vcc, s2, v3 ; GFX1164-NEXT: s_mov_b32 s3, 0x31016000 ; GFX1164-NEXT: s_mov_b32 s2, -1 ; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1164-NEXT: v_mov_b32_e32 v1, v5 +; GFX1164-NEXT: v_mov_b32_e32 v1, v6 ; GFX1164-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s4, v1, vcc ; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0 ; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) @@ -3272,15 +3279,16 @@ ; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX1132-NEXT: s_waitcnt lgkmcnt(0) ; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s2, v2, 0 +; GFX1132-NEXT: ; kill: def $vgpr5 killed $vgpr5 ; GFX1132-NEXT: v_readfirstlane_b32 s2, v0 ; GFX1132-NEXT: v_readfirstlane_b32 s4, v1 ; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[4:5] +; GFX1132-NEXT: v_mad_u64_u32 v[6:7], null, s3, v2, v[4:5] ; GFX1132-NEXT: v_sub_co_u32 v0, vcc_lo, s2, v3 ; GFX1132-NEXT: s_mov_b32 s3, 0x31016000 ; GFX1132-NEXT: s_mov_b32 s2, -1 ; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1132-NEXT: v_mov_b32_e32 v1, v5 +; GFX1132-NEXT: v_mov_b32_e32 v1, v6 ; GFX1132-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s4, v1, vcc_lo ; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0 ; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll @@ -392,6 +392,7 @@ ; GFX11W64-NEXT: s_or_b64 exec, exec, s[6:7] ; GFX11W64-NEXT: s_waitcnt vmcnt(0) ; GFX11W64-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11W64-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX11W64-NEXT: s_waitcnt lgkmcnt(0) ; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11W64-NEXT: v_mad_u64_u32 v[1:2], null, s8, v0, s[0:1] @@ -424,6 +425,7 @@ ; GFX11W32-NEXT: s_or_b32 exec_lo, exec_lo, s5 ; GFX11W32-NEXT: s_waitcnt vmcnt(0) ; GFX11W32-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11W32-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX11W32-NEXT: s_waitcnt lgkmcnt(0) ; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11W32-NEXT: v_mad_u64_u32 v[1:2], null, s4, v0, s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll @@ -405,6 +405,7 @@ ; GFX11W64-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX11W64-NEXT: s_waitcnt vmcnt(0) ; GFX11W64-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11W64-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX11W64-NEXT: s_waitcnt lgkmcnt(0) ; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11W64-NEXT: v_mad_u64_u32 v[1:2], null, s8, v0, s[0:1] @@ -438,6 +439,7 @@ ; GFX11W32-NEXT: s_or_b32 exec_lo, exec_lo, s5 ; GFX11W32-NEXT: s_waitcnt vmcnt(0) ; GFX11W32-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11W32-NEXT: ; kill: def $sgpr1 killed $sgpr1 ; GFX11W32-NEXT: s_waitcnt lgkmcnt(0) ; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11W32-NEXT: v_mad_u64_u32 v[1:2], null, s4, v0, s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -400,12 +400,13 @@ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v0 +; GFX11-NEXT: ; kill: def $vgpr6 killed $vgpr6 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v5, v4, v[2:3] -; GFX11-NEXT: v_ashrrev_i32_e32 v5, 31, v5 -; GFX11-NEXT: v_mov_b32_e32 v3, v1 +; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v5, v4, v[3:4] +; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v3, v4, v[5:6] ; GFX11-NEXT: s_setpc_b64 s[30:31] %ext0 = sext i32 %arg0 to i64 %ext1 = zext i32 %arg1 to i64 @@ -489,11 +490,13 @@ ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v0 ; GFX11-NEXT: v_mov_b32_e32 v6, v1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, v3, v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_and_b32 v5, 1, v6 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[4:5] +; GFX11-NEXT: v_and_b32_e32 v6, 1, v6 +; GFX11-NEXT: ; kill: def $vgpr5 killed $vgpr5 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_mov_b32_e32 v4, v1 +; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v3, v[4:5] ; GFX11-NEXT: s_setpc_b64 s[30:31] %trunc.lhs = and i64 %arg0, 8589934591 %trunc.rhs = and i64 %arg1, 4294967295 @@ -541,11 +544,13 @@ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: v_mov_b32_e32 v6, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_and_b32 v4, 1, v3 +; GFX11-NEXT: v_and_b32_e32 v5, 1, v3 +; GFX11-NEXT: ; kill: def $vgpr4 killed $vgpr4 +; GFX11-NEXT: v_mov_b32_e32 v3, v1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v4, v[3:4] +; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v5, v[3:4] ; GFX11-NEXT: s_setpc_b64 s[30:31] %trunc.lhs = and i64 %arg0, 4294967295 %trunc.rhs = and i64 %arg1, 8589934591 diff --git a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll --- a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll @@ -15,6 +15,14 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, v[2:3] ; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: mad_i32_vvv: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v4, v1 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: ; kill: def $vgpr3 killed $vgpr3 +; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v5, v4, v[2:3] +; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, %c %cast = bitcast i32 %add to float @@ -44,6 +52,13 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, 42 ; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: mad_i32_vvc: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v0 +; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, 42 +; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, 42 %cast = bitcast i32 %add to float @@ -62,6 +77,13 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, 0x12d687 ; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: mad_i32_vvi: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v0 +; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, 0x12d687 +; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, 1234567 %cast = bitcast i32 %add to float @@ -106,8 +128,9 @@ ; ; GFX11-LABEL: mad_i32_vcv: ; GFX11: ; %bb.0: -; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, 42, v[1:2] -; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: ; kill: def $vgpr2 killed $vgpr2 +; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v0, 42, v[1:2] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 ; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, 42 %add = add i32 %mul, %c @@ -147,6 +170,14 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mad_u64_u32 v[0:1], null, v0, v1, s[0:1] ; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: mad_i32_vvs: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, v0 +; GFX11-NEXT: ; kill: def $sgpr1 killed $sgpr1 +; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, s[0:1] +; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, %c %cast = bitcast i32 %add to float @@ -166,8 +197,9 @@ ; ; GFX11-LABEL: mad_i32_vsv: ; GFX11: ; %bb.0: -; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, s0, v[1:2] -; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: ; kill: def $vgpr2 killed $vgpr2 +; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v0, s0, v[1:2] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 ; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, %c @@ -188,8 +220,9 @@ ; ; GFX11-LABEL: mad_i32_svv: ; GFX11: ; %bb.0: -; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, v0, v[1:2] -; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: ; kill: def $vgpr2 killed $vgpr2 +; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, s0, v0, v[1:2] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 ; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, %c @@ -214,6 +247,7 @@ ; GFX11: ; %bb.0: ; GFX11-NEXT: v_mov_b32_e32 v2, v0 ; GFX11-NEXT: s_mov_b32 s2, s1 +; GFX11-NEXT: ; kill: def $sgpr3 killed $sgpr3 ; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, s0, s[2:3] ; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b @@ -239,6 +273,7 @@ ; GFX11: ; %bb.0: ; GFX11-NEXT: v_mov_b32_e32 v2, v0 ; GFX11-NEXT: s_mov_b32 s2, s1 +; GFX11-NEXT: ; kill: def $sgpr3 killed $sgpr3 ; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3] ; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b @@ -261,8 +296,9 @@ ; ; GFX11-LABEL: mad_i32_ssv: ; GFX11: ; %bb.0: -; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, s0, s1, v[0:1] -; GFX11-NEXT: v_mov_b32_e32 v0, v1 +; GFX11-NEXT: ; kill: def $vgpr1 killed $vgpr1 +; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, s1, v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 ; GFX11-NEXT: ; return to shader part epilog %mul = mul i32 %a, %b %add = add i32 %mul, %c diff --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll --- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll +++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll @@ -370,21 +370,25 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, lr} ; CHECK-NEXT: push {r4, r5, r6, r7, lr} -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q1, [r1] ; CHECK-NEXT: ldr.w lr, [sp, #20] -; CHECK-NEXT: vmov.f32 s10, s1 -; CHECK-NEXT: vmov.f32 s14, s5 -; CHECK-NEXT: vmov r5, s4 -; CHECK-NEXT: vmov.f32 s4, s6 -; CHECK-NEXT: vmov.f32 s6, s7 +; CHECK-NEXT: vmov.f32 s10, s5 +; CHECK-NEXT: @ kill: def $s5 killed $s5 +; CHECK-NEXT: vmov.f32 s14, s1 +; CHECK-NEXT: @ kill: def $s1 killed $s1 +; CHECK-NEXT: vmov r5, s0 +; CHECK-NEXT: vmov.f32 s0, s2 +; CHECK-NEXT: vmov.f32 s2, s3 +; CHECK-NEXT: @ kill: def $s3 killed $s3 ; CHECK-NEXT: vmov r0, s10 ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: smull r12, r3, r1, r0 -; CHECK-NEXT: vmov r0, s0 -; CHECK-NEXT: vmov.f32 s0, s2 -; CHECK-NEXT: vmov.f32 s2, s3 -; CHECK-NEXT: vmullb.s32 q2, q1, q0 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov.f32 s4, s6 +; CHECK-NEXT: vmov.f32 s6, s7 +; CHECK-NEXT: @ kill: def $s7 killed $s7 +; CHECK-NEXT: vmullb.s32 q2, q0, q1 ; CHECK-NEXT: asrl r12, r3, r2 ; CHECK-NEXT: vmov r6, r1, d4 ; CHECK-NEXT: vmov r4, r7, d5 diff --git a/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll b/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll --- a/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll +++ b/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll @@ -225,88 +225,92 @@ ; CHECK-NEXT: vldrw.u32 q1, [r4] ; CHECK-NEXT: .LBB1_4: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vldrw.u32 q4, [r5], #16 -; CHECK-NEXT: vldrw.u32 q3, [r0], #16 +; CHECK-NEXT: vldrw.u32 q3, [r5], #16 +; CHECK-NEXT: vldrw.u32 q2, [r0], #16 +; CHECK-NEXT: @ kill: def $s17 killed $s17 +; CHECK-NEXT: @ kill: def $s19 killed $s19 +; CHECK-NEXT: @ kill: def $s21 killed $s21 +; CHECK-NEXT: @ kill: def $s23 killed $s23 ; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: mov.w r2, #-1 -; CHECK-NEXT: vmov.f32 s8, s14 +; CHECK-NEXT: vmov.f32 s16, s10 ; CHECK-NEXT: str r5, [sp, #8] @ 4-byte Spill -; CHECK-NEXT: vmov.f32 s20, s18 +; CHECK-NEXT: vmov.f32 s20, s14 ; CHECK-NEXT: mov.w r8, #0 -; CHECK-NEXT: vmov.f32 s10, s15 -; CHECK-NEXT: vmov.f32 s22, s19 -; CHECK-NEXT: vmullb.s32 q6, q5, q2 -; CHECK-NEXT: vmov.f32 s18, s17 +; CHECK-NEXT: vmov.f32 s18, s11 +; CHECK-NEXT: vmov.f32 s22, s15 +; CHECK-NEXT: vmullb.s32 q6, q5, q4 +; CHECK-NEXT: vmov.f32 s14, s13 ; CHECK-NEXT: vmov r4, r7, d12 ; CHECK-NEXT: asrl r4, r7, #31 -; CHECK-NEXT: vmov.f32 s14, s13 +; CHECK-NEXT: vmov.f32 s10, s9 ; CHECK-NEXT: rsbs.w r5, r4, #-2147483648 ; CHECK-NEXT: sbcs.w r5, r2, r7 ; CHECK-NEXT: csetm r5, lt ; CHECK-NEXT: bfi r8, r5, #0, #8 ; CHECK-NEXT: vmov r10, r5, d13 ; CHECK-NEXT: asrl r10, r5, #31 -; CHECK-NEXT: vmov r6, s18 +; CHECK-NEXT: vmov r6, s14 ; CHECK-NEXT: rsbs.w r3, r10, #-2147483648 -; CHECK-NEXT: vmov q2[2], q2[0], r4, r10 +; CHECK-NEXT: vmov q4[2], q4[0], r4, r10 ; CHECK-NEXT: sbcs.w r3, r2, r5 -; CHECK-NEXT: vmov q2[3], q2[1], r7, r5 +; CHECK-NEXT: vmov q4[3], q4[1], r7, r5 ; CHECK-NEXT: csetm r3, lt ; CHECK-NEXT: bfi r8, r3, #8, #8 ; CHECK-NEXT: vmsr p0, r8 ; CHECK-NEXT: mvn r8, #-2147483648 -; CHECK-NEXT: vpsel q2, q2, q0 -; CHECK-NEXT: vmov r3, r4, d4 +; CHECK-NEXT: vpsel q4, q4, q0 +; CHECK-NEXT: vmov r3, r4, d8 ; CHECK-NEXT: subs.w r3, r3, r8 ; CHECK-NEXT: sbcs r3, r4, #0 ; CHECK-NEXT: mov.w r4, #0 ; CHECK-NEXT: csetm r3, lt ; CHECK-NEXT: bfi r4, r3, #0, #8 -; CHECK-NEXT: vmov r3, r5, d5 +; CHECK-NEXT: vmov r3, r5, d9 ; CHECK-NEXT: subs.w r3, r3, r8 ; CHECK-NEXT: sbcs r3, r5, #0 ; CHECK-NEXT: mov.w r5, #0 ; CHECK-NEXT: csetm r3, lt ; CHECK-NEXT: bfi r4, r3, #8, #8 -; CHECK-NEXT: vmov r3, s12 +; CHECK-NEXT: vmov r3, s8 ; CHECK-NEXT: vmsr p0, r4 -; CHECK-NEXT: vmov r4, s16 -; CHECK-NEXT: vpsel q2, q2, q1 +; CHECK-NEXT: vmov r4, s12 +; CHECK-NEXT: vpsel q4, q4, q1 ; CHECK-NEXT: smull r4, r7, r4, r3 ; CHECK-NEXT: asrl r4, r7, #31 ; CHECK-NEXT: rsbs.w r3, r4, #-2147483648 ; CHECK-NEXT: sbcs.w r3, r2, r7 ; CHECK-NEXT: csetm r3, lt ; CHECK-NEXT: bfi r5, r3, #0, #8 -; CHECK-NEXT: vmov r3, s14 +; CHECK-NEXT: vmov r3, s10 ; CHECK-NEXT: smull r6, r3, r6, r3 ; CHECK-NEXT: asrl r6, r3, #31 ; CHECK-NEXT: rsbs.w r1, r6, #-2147483648 -; CHECK-NEXT: vmov q3[2], q3[0], r4, r6 +; CHECK-NEXT: vmov q2[2], q2[0], r4, r6 ; CHECK-NEXT: sbcs.w r1, r2, r3 -; CHECK-NEXT: vmov q3[3], q3[1], r7, r3 +; CHECK-NEXT: vmov q2[3], q2[1], r7, r3 ; CHECK-NEXT: csetm r1, lt ; CHECK-NEXT: bfi r5, r1, #8, #8 ; CHECK-NEXT: vmsr p0, r5 ; CHECK-NEXT: ldrd r5, r2, [sp, #8] @ 8-byte Folded Reload -; CHECK-NEXT: vpsel q3, q3, q0 -; CHECK-NEXT: vmov r1, r3, d6 +; CHECK-NEXT: vpsel q2, q2, q0 +; CHECK-NEXT: vmov r1, r3, d4 ; CHECK-NEXT: subs.w r1, r1, r8 ; CHECK-NEXT: sbcs r1, r3, #0 ; CHECK-NEXT: mov.w r3, #0 ; CHECK-NEXT: csetm r1, lt ; CHECK-NEXT: bfi r3, r1, #0, #8 -; CHECK-NEXT: vmov r1, r4, d7 +; CHECK-NEXT: vmov r1, r4, d5 ; CHECK-NEXT: subs.w r1, r1, r8 ; CHECK-NEXT: sbcs r1, r4, #0 ; CHECK-NEXT: csetm r1, lt ; CHECK-NEXT: bfi r3, r1, #8, #8 ; CHECK-NEXT: vmsr p0, r3 -; CHECK-NEXT: vpsel q3, q3, q1 -; CHECK-NEXT: vmov.f32 s13, s14 -; CHECK-NEXT: vmov.f32 s14, s8 -; CHECK-NEXT: vmov.f32 s15, s10 -; CHECK-NEXT: vstrb.8 q3, [r2], #16 +; CHECK-NEXT: vpsel q2, q2, q1 +; CHECK-NEXT: vmov.f32 s9, s10 +; CHECK-NEXT: vmov.f32 s10, s16 +; CHECK-NEXT: vmov.f32 s11, s18 +; CHECK-NEXT: vstrb.8 q2, [r2], #16 ; CHECK-NEXT: le lr, .LBB1_4 ; CHECK-NEXT: @ %bb.5: @ %middle.block ; CHECK-NEXT: ldrd r1, r3, [sp] @ 8-byte Folded Reload @@ -462,20 +466,24 @@ ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload ; CHECK-NEXT: vdup.32 q4, r9 +; CHECK-NEXT: @ kill: def $s25 killed $s25 +; CHECK-NEXT: @ kill: def $s27 killed $s27 +; CHECK-NEXT: @ kill: def $s29 killed $s29 +; CHECK-NEXT: @ kill: def $s31 killed $s31 ; CHECK-NEXT: movs r4, #0 ; CHECK-NEXT: add.w r9, r9, #4 ; CHECK-NEXT: vorr q4, q4, q0 ; CHECK-NEXT: vcmp.u32 cs, q1, q4 ; CHECK-NEXT: vstr p0, [sp, #20] @ 4-byte Spill ; CHECK-NEXT: vpstt -; CHECK-NEXT: vldrwt.u32 q5, [r0], #16 -; CHECK-NEXT: vldrwt.u32 q6, [r1], #16 -; CHECK-NEXT: vmov.f32 s16, s22 -; CHECK-NEXT: vmov.f32 s18, s23 -; CHECK-NEXT: vmov.f32 s28, s26 -; CHECK-NEXT: vmov.f32 s30, s27 -; CHECK-NEXT: vmullb.s32 q0, q7, q4 -; CHECK-NEXT: vmov.f32 s22, s25 +; CHECK-NEXT: vldrwt.u32 q4, [r0], #16 +; CHECK-NEXT: vldrwt.u32 q5, [r1], #16 +; CHECK-NEXT: vmov.f32 s24, s18 +; CHECK-NEXT: vmov.f32 s26, s19 +; CHECK-NEXT: vmov.f32 s28, s22 +; CHECK-NEXT: vmov.f32 s30, s23 +; CHECK-NEXT: vmullb.s32 q0, q7, q6 +; CHECK-NEXT: vmov.f32 s18, s21 ; CHECK-NEXT: vmov r10, r5, d0 ; CHECK-NEXT: asrl r10, r5, #31 ; CHECK-NEXT: rsbs.w r7, r10, #-2147483648 @@ -489,7 +497,7 @@ ; CHECK-NEXT: sbcs.w r3, r12, r7 ; CHECK-NEXT: vmov q0[3], q0[1], r5, r7 ; CHECK-NEXT: csetm r3, lt -; CHECK-NEXT: vmov r7, s22 +; CHECK-NEXT: vmov r7, s18 ; CHECK-NEXT: bfi r4, r3, #8, #8 ; CHECK-NEXT: vmsr p0, r4 ; CHECK-NEXT: vpsel q0, q0, q2 @@ -504,11 +512,11 @@ ; CHECK-NEXT: sbcs r3, r5, #0 ; CHECK-NEXT: csetm r3, lt ; CHECK-NEXT: bfi r4, r3, #8, #8 -; CHECK-NEXT: vmov r3, s20 +; CHECK-NEXT: vmov r3, s16 ; CHECK-NEXT: vmsr p0, r4 -; CHECK-NEXT: vmov r4, s24 -; CHECK-NEXT: vpsel q4, q0, q3 -; CHECK-NEXT: vmov.f32 s2, s21 +; CHECK-NEXT: vmov r4, s20 +; CHECK-NEXT: vpsel q6, q0, q3 +; CHECK-NEXT: vmov.f32 s2, s17 ; CHECK-NEXT: smull r10, r5, r4, r3 ; CHECK-NEXT: movs r4, #0 ; CHECK-NEXT: asrl r10, r5, #31 @@ -542,8 +550,8 @@ ; CHECK-NEXT: vpsel q0, q0, q3 ; CHECK-NEXT: vldr p0, [sp, #20] @ 4-byte Reload ; CHECK-NEXT: vmov.f32 s1, s2 -; CHECK-NEXT: vmov.f32 s2, s16 -; CHECK-NEXT: vmov.f32 s3, s18 +; CHECK-NEXT: vmov.f32 s2, s24 +; CHECK-NEXT: vmov.f32 s3, s26 ; CHECK-NEXT: vpst ; CHECK-NEXT: vstrwt.32 q0, [r2], #16 ; CHECK-NEXT: le lr, .LBB2_2 @@ -790,29 +798,39 @@ ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q1, [r0], #16 ; CHECK-NEXT: vldrw.u32 q3, [r1], #16 +; CHECK-NEXT: @ kill: def $s9 killed $s9 +; CHECK-NEXT: @ kill: def $s11 killed $s11 +; CHECK-NEXT: @ kill: def $s17 killed $s17 +; CHECK-NEXT: @ kill: def $s19 killed $s19 ; CHECK-NEXT: vmov.f32 s8, s6 -; CHECK-NEXT: vmov.f32 s16, s14 ; CHECK-NEXT: vmov.f32 s10, s7 +; CHECK-NEXT: @ kill: def $s7 killed $s7 +; CHECK-NEXT: vmov.f32 s16, s14 ; CHECK-NEXT: vmov.f32 s18, s15 ; CHECK-NEXT: vmullb.u32 q5, q4, q2 -; CHECK-NEXT: vmov.f32 s6, s5 +; CHECK-NEXT: vmov.f32 s16, s4 ; CHECK-NEXT: vmov r10, r5, d10 +; CHECK-NEXT: @ kill: def $s17 killed $s17 +; CHECK-NEXT: @ kill: def $s19 killed $s19 ; CHECK-NEXT: lsrl r10, r5, #31 -; CHECK-NEXT: vmov.f32 s14, s13 +; CHECK-NEXT: vmov.f32 s18, s5 ; CHECK-NEXT: subs.w r6, r10, #-1 -; CHECK-NEXT: vmullb.u32 q4, q3, q1 +; CHECK-NEXT: vmov.f32 s4, s12 ; CHECK-NEXT: sbcs r5, r5, #0 ; CHECK-NEXT: mov.w r6, #0 ; CHECK-NEXT: csetm r5, lo +; CHECK-NEXT: vmov.f32 s6, s13 ; CHECK-NEXT: bfi r6, r5, #0, #8 ; CHECK-NEXT: vmov r4, r5, d11 ; CHECK-NEXT: lsrl r4, r5, #31 +; CHECK-NEXT: @ kill: def $s5 killed $s5 +; CHECK-NEXT: vmullb.u32 q3, q1, q4 ; CHECK-NEXT: subs.w r7, r4, #-1 ; CHECK-NEXT: vmov q2[2], q2[0], r10, r4 ; CHECK-NEXT: sbcs r5, r5, #0 ; CHECK-NEXT: csetm r5, lo ; CHECK-NEXT: bfi r6, r5, #8, #8 -; CHECK-NEXT: vmov r10, r5, d8 +; CHECK-NEXT: vmov r10, r5, d6 ; CHECK-NEXT: lsrl r10, r5, #31 ; CHECK-NEXT: vmsr p0, r6 ; CHECK-NEXT: subs.w r6, r10, #-1 @@ -821,7 +839,7 @@ ; CHECK-NEXT: mov.w r6, #0 ; CHECK-NEXT: csetm r5, lo ; CHECK-NEXT: bfi r6, r5, #0, #8 -; CHECK-NEXT: vmov r4, r5, d9 +; CHECK-NEXT: vmov r4, r5, d7 ; CHECK-NEXT: lsrl r4, r5, #31 ; CHECK-NEXT: subs.w r7, r4, #-1 ; CHECK-NEXT: vmov q1[2], q1[0], r10, r4 diff --git a/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll b/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll --- a/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll @@ -190,10 +190,16 @@ define arm_aapcs_vfpcc <4 x i64> @sext32_0213_0ext(<8 x i32> %src1, i32 %src2) { ; CHECK-LABEL: sext32_0213_0ext: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s0 ; CHECK-NEXT: vmov q3[2], q3[0], r0, r0 -; CHECK-NEXT: vmullb.s32 q2, q0, q3 +; CHECK-NEXT: vmov.f32 s6, s2 +; CHECK-NEXT: @ kill: def $s5 killed $s5 +; CHECK-NEXT: @ kill: def $s7 killed $s7 ; CHECK-NEXT: vmov.f32 s0, s1 +; CHECK-NEXT: vmullb.s32 q2, q1, q3 ; CHECK-NEXT: vmov.f32 s2, s3 +; CHECK-NEXT: @ kill: def $s1 killed $s1 +; CHECK-NEXT: @ kill: def $s3 killed $s3 ; CHECK-NEXT: vmullb.s32 q1, q0, q3 ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr @@ -210,10 +216,16 @@ define arm_aapcs_vfpcc <4 x i64> @sext32_0ext_0213(<8 x i32> %src1, i32 %src2) { ; CHECK-LABEL: sext32_0ext_0213: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s0 ; CHECK-NEXT: vmov q3[2], q3[0], r0, r0 -; CHECK-NEXT: vmullb.s32 q2, q3, q0 +; CHECK-NEXT: vmov.f32 s6, s2 +; CHECK-NEXT: @ kill: def $s5 killed $s5 +; CHECK-NEXT: @ kill: def $s7 killed $s7 ; CHECK-NEXT: vmov.f32 s0, s1 +; CHECK-NEXT: vmullb.s32 q2, q3, q1 ; CHECK-NEXT: vmov.f32 s2, s3 +; CHECK-NEXT: @ kill: def $s1 killed $s1 +; CHECK-NEXT: @ kill: def $s3 killed $s3 ; CHECK-NEXT: vmullb.s32 q1, q3, q0 ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr @@ -466,10 +478,16 @@ define arm_aapcs_vfpcc <4 x i64> @zext32_0213_0ext(<8 x i32> %src1, i32 %src2) { ; CHECK-LABEL: zext32_0213_0ext: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s0 ; CHECK-NEXT: vmov q3[2], q3[0], r0, r0 -; CHECK-NEXT: vmullb.u32 q2, q0, q3 +; CHECK-NEXT: vmov.f32 s6, s2 +; CHECK-NEXT: @ kill: def $s5 killed $s5 +; CHECK-NEXT: @ kill: def $s7 killed $s7 ; CHECK-NEXT: vmov.f32 s0, s1 +; CHECK-NEXT: vmullb.u32 q2, q1, q3 ; CHECK-NEXT: vmov.f32 s2, s3 +; CHECK-NEXT: @ kill: def $s1 killed $s1 +; CHECK-NEXT: @ kill: def $s3 killed $s3 ; CHECK-NEXT: vmullb.u32 q1, q0, q3 ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr @@ -486,10 +504,16 @@ define arm_aapcs_vfpcc <4 x i64> @zext32_0ext_0213(<8 x i32> %src1, i32 %src2) { ; CHECK-LABEL: zext32_0ext_0213: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s0 ; CHECK-NEXT: vmov q3[2], q3[0], r0, r0 -; CHECK-NEXT: vmullb.u32 q2, q3, q0 +; CHECK-NEXT: vmov.f32 s6, s2 +; CHECK-NEXT: @ kill: def $s5 killed $s5 +; CHECK-NEXT: @ kill: def $s7 killed $s7 ; CHECK-NEXT: vmov.f32 s0, s1 +; CHECK-NEXT: vmullb.u32 q2, q3, q1 ; CHECK-NEXT: vmov.f32 s2, s3 +; CHECK-NEXT: @ kill: def $s1 killed $s1 +; CHECK-NEXT: @ kill: def $s3 killed $s3 ; CHECK-NEXT: vmullb.u32 q1, q3, q0 ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr