Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -2013,7 +2013,10 @@ unsigned NumLanes = InsRegs.size(); if (!NumLanes) { NumLanes = 1; - InsRegs.push_back(MI.getOperand(2).getReg()); + Register InReg = MI.getOperand(2).getReg(); + if (DstBank == AMDGPU::VGPRRegBank && InsBank == AMDGPU::SGPRRegBank) + InReg = B.buildCopy(MRI.getType(InReg), InReg).getReg(0); + InsRegs.push_back(InReg); } else { EltTy = MRI.getType(InsRegs[0]); } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll @@ -6,18 +6,18 @@ define amdgpu_ps void @insertelement_s_v2i8_s_s(<2 x i8> addrspace(4)* inreg %ptr, i8 inreg %val, i32 inreg %idx) { ; GFX9-LABEL: insertelement_s_v2i8_s_s: ; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-NEXT: global_load_ushort v1, v1, s[2:3] -; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: global_load_ushort v0, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, s4 ; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s5, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-NEXT: v_and_b32_e32 v0, 0xff, v0 -; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 -; GFX9-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_short v[0:1], v2, off @@ -28,13 +28,13 @@ ; GFX8-NEXT: v_mov_b32_e32 v0, s2 ; GFX8-NEXT: v_mov_b32_e32 v1, s3 ; GFX8-NEXT: flat_load_ushort v0, v[0:1] -; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s4 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s5, 1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc ; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v1 ; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX8-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -49,20 +49,20 @@ ; GFX7-NEXT: s_mov_b32 s1, s3 ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0 -; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: buffer_load_ushort v0, off, s[0:3], 0 +; GFX7-NEXT: v_mov_b32_e32 v2, s4 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 -; GFX7-NEXT: v_mov_b32_e32 v2, 0xff +; GFX7-NEXT: v_mov_b32_e32 v1, 0xff ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 8, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s5, 1 -; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc -; GFX7-NEXT: v_and_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc +; GFX7-NEXT: v_and_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_and_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm %vec = load <2 x i8>, <2 x i8> addrspace(4)* %ptr @@ -75,13 +75,13 @@ ; GFX9-LABEL: insertelement_v_v2i8_s_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_ushort v0, v[0:1], off -; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 ; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s3, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc ; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v1 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -93,13 +93,13 @@ ; GFX8-LABEL: insertelement_v_v2i8_s_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_ushort v0, v[0:1] -; GFX8-NEXT: v_mov_b32_e32 v2, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s2 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s3, 1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc ; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v1 ; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX8-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -203,18 +203,18 @@ define amdgpu_ps void @insertelement_s_v2i8_s_v(<2 x i8> addrspace(4)* inreg %ptr, i8 inreg %val, i32 %idx) { ; GFX9-LABEL: insertelement_s_v2i8_s_v: ; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v2, 0 -; GFX9-NEXT: global_load_ushort v2, v2, s[2:3] -; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_load_ushort v1, v1, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, s4 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v1, vcc +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc ; GFX9-NEXT: v_and_b32_e32 v0, 0xff, v0 ; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_short v[0:1], v2, off @@ -225,13 +225,13 @@ ; GFX8-NEXT: v_mov_b32_e32 v1, s2 ; GFX8-NEXT: v_mov_b32_e32 v2, s3 ; GFX8-NEXT: flat_load_ushort v1, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v3, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, s4 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc ; GFX8-NEXT: v_and_b32_e32 v0, 0xff, v0 ; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX8-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -246,18 +246,18 @@ ; GFX7-NEXT: s_mov_b32 s1, s3 ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: buffer_load_ushort v2, off, s[0:3], 0 -; GFX7-NEXT: v_mov_b32_e32 v1, s4 +; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0 +; GFX7-NEXT: v_mov_b32_e32 v3, s4 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; GFX7-NEXT: v_mov_b32_e32 v3, 0xff +; GFX7-NEXT: v_mov_b32_e32 v2, 0xff ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v2 -; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v1, vcc +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v1 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX7-NEXT: v_and_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_and_b32_e32 v1, v2, v3 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc +; GFX7-NEXT: v_and_b32_e32 v0, v0, v2 +; GFX7-NEXT: v_and_b32_e32 v1, v1, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 @@ -337,13 +337,13 @@ ; GFX9-LABEL: insertelement_v_v2i8_s_v: ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_ushort v0, v[0:1], off -; GFX9-NEXT: v_mov_b32_e32 v3, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc ; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v1 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -355,13 +355,13 @@ ; GFX8-LABEL: insertelement_v_v2i8_s_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_ushort v0, v[0:1] -; GFX8-NEXT: v_mov_b32_e32 v3, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s2 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc ; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v1 ; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX8-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -372,14 +372,14 @@ ; ; GFX7-LABEL: insertelement_v_v2i8_s_v: ; GFX7: ; %bb.0: +; GFX7-NEXT: s_mov_b32 s6, 0 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v3, s2 -; GFX7-NEXT: s_mov_b32 s2, 0 -; GFX7-NEXT: s_mov_b32 s3, 0xf000 -; GFX7-NEXT: s_mov_b64 s[0:1], 0 -; GFX7-NEXT: buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GFX7-NEXT: v_mov_b32_e32 v1, 0xff -; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc @@ -389,7 +389,7 @@ ; GFX7-NEXT: v_and_b32_e32 v1, v2, v1 ; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 +; GFX7-NEXT: buffer_store_short v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm %vec = load <2 x i8>, <2 x i8> addrspace(1)* %ptr %insert = insertelement <2 x i8> %vec, i8 %val, i32 %idx Index: llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll @@ -192,8 +192,8 @@ ; GPRIDX-NEXT: s_mov_b32 s4, s6 ; GPRIDX-NEXT: s_mov_b32 s6, s8 ; GPRIDX-NEXT: v_mov_b32_e32 v15, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v7, s10 ; GPRIDX-NEXT: v_mov_b32_e32 v8, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v7, s10 ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 ; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v7, vcc ; GPRIDX-NEXT: v_mov_b32_e32 v9, s1 @@ -230,31 +230,30 @@ ; MOVREL-NEXT: s_mov_b32 s4, s6 ; MOVREL-NEXT: s_mov_b32 s6, s8 ; MOVREL-NEXT: v_mov_b32_e32 v15, s7 -; MOVREL-NEXT: v_mov_b32_e32 v7, s10 ; MOVREL-NEXT: v_mov_b32_e32 v8, s0 ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 ; MOVREL-NEXT: v_mov_b32_e32 v9, s1 ; MOVREL-NEXT: v_mov_b32_e32 v10, s2 ; MOVREL-NEXT: v_mov_b32_e32 v11, s3 ; MOVREL-NEXT: v_mov_b32_e32 v12, s4 -; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v8, v8, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 ; MOVREL-NEXT: v_mov_b32_e32 v13, s5 ; MOVREL-NEXT: v_mov_b32_e32 v14, s6 -; MOVREL-NEXT: v_cndmask_b32_e32 v1, v9, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v1, v9, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; MOVREL-NEXT: v_cndmask_b32_e32 v2, v10, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v2, v10, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; MOVREL-NEXT: v_cndmask_b32_e32 v3, v11, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v3, v11, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; MOVREL-NEXT: v_cndmask_b32_e32 v4, v12, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v4, v12, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; MOVREL-NEXT: v_cndmask_b32_e32 v5, v13, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v5, v13, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; MOVREL-NEXT: v_cndmask_b32_e32 v6, v14, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v6, v14, s10, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 ; MOVREL-NEXT: v_mov_b32_e32 v0, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v7, v15, s10, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -364,23 +363,22 @@ ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: v_mov_b32_e32 v8, s2 -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 0 -; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 1 -; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 2 -; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 3 -; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 4 -; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 5 -; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 6 -; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo -; MOVREL-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 7 -; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 1 +; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 2 +; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 3 +; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 4 +; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 5 +; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 6 +; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, s0 +; MOVREL-NEXT: v_cmp_eq_u32_e64 s0, s3, 7 +; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s2, s0 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -492,23 +490,22 @@ ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: v_mov_b32_e32 v9, s2 ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, vcc_lo ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v8 -; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s2, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx Index: llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-insert-vector-elt.mir @@ -46,22 +46,23 @@ ; CHECK-LABEL: name: insert_vector_elt_v4i32_v_s_s ; CHECK: [[COPY:%[0-9]+]]:vgpr(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 - ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) + ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>) ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C]] - ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY1]], [[UV]] + ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV]] ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]] - ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY1]], [[UV1]] + ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY4]], [[UV1]] ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2 ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C2]] - ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY1]], [[UV2]] + ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY4]], [[UV2]] ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3 ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C3]] - ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY1]], [[UV3]] + ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY4]], [[UV3]] ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32) ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 @@ -119,22 +120,23 @@ ; CHECK-LABEL: name: insert_vector_elt_v4i32_s_s_v ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 ; CHECK: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 - ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $sgpr4 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr4 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(<4 x s32>) = COPY [[COPY]](<4 x s32>) + ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY3]](<4 x s32>) ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] - ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY1]], [[UV]] + ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY4]], [[UV]] ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]] - ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY1]], [[UV1]] + ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY4]], [[UV1]] ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2 ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]] - ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY1]], [[UV2]] + ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY4]], [[UV2]] ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3 ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C3]] - ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY1]], [[UV3]] + ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY4]], [[UV3]] ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32) ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 @@ -193,21 +195,22 @@ ; CHECK-LABEL: name: insert_vector_elt_var_v4i32_v_s_v ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $sgpr4, $vgpr0 ; CHECK: [[COPY:%[0-9]+]]:vgpr(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 - ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $sgpr4 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr4 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>) ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] - ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY1]], [[UV]] + ; CHECK: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP]](s1), [[COPY3]], [[UV]] ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]] - ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY1]], [[UV1]] + ; CHECK: [[SELECT1:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP1]](s1), [[COPY3]], [[UV1]] ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2 ; CHECK: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C2]] - ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY1]], [[UV2]] + ; CHECK: [[SELECT2:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP2]](s1), [[COPY3]], [[UV2]] ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 3 ; CHECK: [[ICMP3:%[0-9]+]]:vcc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C3]] - ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY1]], [[UV3]] + ; CHECK: [[SELECT3:%[0-9]+]]:vgpr(s32) = G_SELECT [[ICMP3]](s1), [[COPY3]], [[UV3]] ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32) ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3