Index: llvm/lib/Target/AMDGPU/AMDGPUCombine.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUCombine.td +++ llvm/lib/Target/AMDGPU/AMDGPUCombine.td @@ -40,9 +40,15 @@ def extract_elt_to_cmp_select : GICombineRule< (defs root:$extract_elt), (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$extract_elt, - [{ return matchExtractElt(*${extract_elt}, MRI, *MF); }]), + [{ return matchExtractInsertElt(*${extract_elt}, MRI, *MF); }]), (apply [{ applyExtractEltToCmpSelect(*${extract_elt}, MRI); }])>; +def insert_elt_to_cmp_select : GICombineRule< + (defs root:$insert_elt), + (match (wip_match_opcode G_INSERT_VECTOR_ELT):$insert_elt, + [{ return matchExtractInsertElt(*${insert_elt}, MRI, *MF); }]), + (apply [{ applyInsertEltToCmpSelect(*${insert_elt}, MRI); }])>; + // Combines which should only apply on SI/VI def gfx6gfx7_combines : GICombineGroup<[fcmp_select_to_fmin_fmax_legacy]>; @@ -59,7 +65,7 @@ // FIXME: Is there a way to remove a single item from all_combines? def all_combines_minus_extload : GICombineGroup<[trivial_combines, ptr_add_immed_chain, combine_indexed_load_store, extract_elt_to_cmp_select, - undef_combines, identity_combines] + insert_elt_to_cmp_select, undef_combines, identity_combines] >; def AMDGPUPostLegalizerCombinerHelper: GICombinerHelper< Index: llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp @@ -219,10 +219,10 @@ MI.eraseFromParent(); } -static bool matchExtractElt(MachineInstr &MI, MachineRegisterInfo &MRI, - MachineFunction &MF) { +static bool matchExtractInsertElt(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineFunction &MF) { Register VecReg = MI.getOperand(1).getReg(); - Register Idx = MI.getOperand(2).getReg(); + Register Idx = MI.getOperand(MI.getNumExplicitOperands() - 1).getReg(); if (getOpcodeDef(TargetOpcode::G_CONSTANT, Idx, MRI)) return false; @@ -274,6 +274,40 @@ MI.eraseFromParent(); } +static void applyInsertEltToCmpSelect(MachineInstr &MI, + MachineRegisterInfo &MRI) { + MachineIRBuilder B(MI); + + Register VecReg = MI.getOperand(1).getReg(); + Register Ins = MI.getOperand(2).getReg(); + Register Idx = MI.getOperand(3).getReg(); + + LLT VecTy = MRI.getType(VecReg); + LLT EltTy = VecTy.getScalarType(); + unsigned EltSize = EltTy.getSizeInBits(); + unsigned NumElem = VecTy.getNumElements(); + MachineOperand &Vec = MI.getOperand(1); + SmallVector Ops; + + for (unsigned I = 0; I < NumElem; ++I) { + // FIXME: After RegBankSelect we should know and set register bank. + Register Elt = MRI.createGenericVirtualRegister(EltTy); + B.buildExtract(Elt, Vec, I * EltSize); + + Register Cmp = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register IC = MRI.createGenericVirtualRegister(LLT::scalar(32)); + B.buildConstant(IC, I); + B.buildICmp(CmpInst::ICMP_EQ, Cmp, Idx, IC); + Register Sel = MRI.createGenericVirtualRegister(EltTy); + B.buildSelect(Sel, Cmp, Ins, Elt); + + Ops.push_back(Sel); + } + + B.buildBuildVector(MI.getOperand(0).getReg(), Ops); + MI.eraseFromParent(); +} + #define AMDGPUPOSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS #include "AMDGPUGenPostLegalizeGICombiner.inc" #undef AMDGPUPOSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS Index: llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll @@ -9,32 +9,91 @@ define amdgpu_ps <8 x i32> @dyn_insertelement_v8i32_s_s_s(<8 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8i32_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 m0, s11 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s0, s10 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: s_and_b32 s5, s5, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s5, 0 +; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 +; GPRIDX-NEXT: s_cselect_b32 s7, 1, 0 +; GPRIDX-NEXT: s_and_b32 s7, s7, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s7, 0 +; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8i32_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 m0, s11 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_movreld_b32 s0, s10 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: s_and_b32 s5, s5, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s5, 0 +; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 +; MOVREL-NEXT: s_cselect_b32 s7, 1, 0 +; MOVREL-NEXT: s_and_b32 s7, s7, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s7, 0 +; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x i32> %vec, i32 %val, i32 %idx @@ -44,32 +103,91 @@ define amdgpu_ps <8 x i8 addrspace(3)*> @dyn_insertelement_v8p3i8_s_s_s(<8 x i8 addrspace(3)*> inreg %vec, i8 addrspace(3)* inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8p3i8_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 m0, s11 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s0, s10 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: s_and_b32 s5, s5, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s5, 0 +; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 +; GPRIDX-NEXT: s_cselect_b32 s7, 1, 0 +; GPRIDX-NEXT: s_and_b32 s7, s7, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s7, 0 +; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8p3i8_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 m0, s11 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_movreld_b32 s0, s10 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: s_and_b32 s5, s5, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s5, 0 +; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 +; MOVREL-NEXT: s_cselect_b32 s7, 1, 0 +; MOVREL-NEXT: s_and_b32 s7, s7, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s7, 0 +; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 %idx @@ -80,100 +198,54 @@ ; GPRIDX-LABEL: dyn_insertelement_v8f32_const_s_v_v: ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GPRIDX-NEXT: s_mov_b32 s11, 0x41000000 -; GPRIDX-NEXT: s_mov_b32 s10, 0x40e00000 -; GPRIDX-NEXT: s_mov_b32 s9, 0x40c00000 -; GPRIDX-NEXT: s_mov_b32 s8, 0x40a00000 -; GPRIDX-NEXT: s_mov_b32 s7, 4.0 -; GPRIDX-NEXT: s_mov_b32 s6, 0x40400000 -; GPRIDX-NEXT: s_mov_b32 s5, 2.0 -; GPRIDX-NEXT: s_mov_b32 s4, 1.0 -; GPRIDX-NEXT: v_mov_b32_e32 v17, s11 -; GPRIDX-NEXT: v_mov_b32_e32 v16, s10 -; GPRIDX-NEXT: v_mov_b32_e32 v15, s9 -; GPRIDX-NEXT: v_mov_b32_e32 v14, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v13, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v12, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v11, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v10, s4 -; GPRIDX-NEXT: s_mov_b64 s[4:5], exec -; GPRIDX-NEXT: BB2_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s6, v1 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s6, v1 -; GPRIDX-NEXT: s_set_gpr_idx_on s6, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v2, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v17 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v0 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB2_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[4:5] -; GPRIDX-NEXT: v_mov_b32_e32 v0, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v8 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v9 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, 1.0, v0, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, 2.0, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, 0x40400000 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, 4.0, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, 0x40a00000 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v5, 0x40c00000 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v6, 0x40e00000 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v1 +; GPRIDX-NEXT: v_mov_b32_e32 v7, 0x41000000 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 +; GPRIDX-NEXT: v_mov_b32_e32 v1, v9 ; GPRIDX-NEXT: s_setpc_b64 s[30:31] ; ; MOVREL-LABEL: dyn_insertelement_v8f32_const_s_v_v: ; MOVREL: ; %bb.0: ; %entry ; MOVREL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; MOVREL-NEXT: s_waitcnt_vscnt null, 0x0 -; MOVREL-NEXT: s_mov_b32 s11, 0x41000000 -; MOVREL-NEXT: s_mov_b32 s10, 0x40e00000 -; MOVREL-NEXT: s_mov_b32 s9, 0x40c00000 -; MOVREL-NEXT: s_mov_b32 s8, 0x40a00000 -; MOVREL-NEXT: s_mov_b32 s7, 4.0 -; MOVREL-NEXT: s_mov_b32 s6, 0x40400000 -; MOVREL-NEXT: s_mov_b32 s5, 2.0 -; MOVREL-NEXT: s_mov_b32 s4, 1.0 -; MOVREL-NEXT: v_mov_b32_e32 v17, s11 -; MOVREL-NEXT: v_mov_b32_e32 v16, s10 -; MOVREL-NEXT: v_mov_b32_e32 v15, s9 -; MOVREL-NEXT: v_mov_b32_e32 v14, s8 -; MOVREL-NEXT: v_mov_b32_e32 v13, s7 -; MOVREL-NEXT: v_mov_b32_e32 v12, s6 -; MOVREL-NEXT: v_mov_b32_e32 v11, s5 -; MOVREL-NEXT: v_mov_b32_e32 v10, s4 -; MOVREL-NEXT: s_mov_b32 s4, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB2_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s5, v1 -; MOVREL-NEXT: v_mov_b32_e32 v2, v10 -; MOVREL-NEXT: v_mov_b32_e32 v3, v11 -; MOVREL-NEXT: v_mov_b32_e32 v4, v12 -; MOVREL-NEXT: v_mov_b32_e32 v5, v13 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s5, v1 -; MOVREL-NEXT: s_mov_b32 m0, s5 -; MOVREL-NEXT: v_mov_b32_e32 v6, v14 -; MOVREL-NEXT: v_mov_b32_e32 v7, v15 -; MOVREL-NEXT: v_mov_b32_e32 v8, v16 -; MOVREL-NEXT: v_mov_b32_e32 v9, v17 -; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB2_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s4 -; MOVREL-NEXT: v_mov_b32_e32 v0, v2 -; MOVREL-NEXT: v_mov_b32_e32 v1, v3 -; MOVREL-NEXT: v_mov_b32_e32 v2, v4 -; MOVREL-NEXT: v_mov_b32_e32 v3, v5 -; MOVREL-NEXT: v_mov_b32_e32 v4, v6 -; MOVREL-NEXT: v_mov_b32_e32 v5, v7 -; MOVREL-NEXT: v_mov_b32_e32 v6, v8 -; MOVREL-NEXT: v_mov_b32_e32 v7, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, 1.0, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v9, 2.0, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, 0x40400000, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, 4.0, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, 0x40a00000, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, 0x40c00000, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, 0x40e00000, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v1 +; MOVREL-NEXT: v_mov_b32_e32 v1, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, 0x41000000, v0, vcc_lo +; MOVREL-NEXT: v_mov_b32_e32 v0, v8 ; MOVREL-NEXT: s_setpc_b64 s[30:31] entry: %insert = insertelement <8 x float> , float %val, i32 %idx @@ -183,91 +255,62 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_v(<8 x float> inreg %vec, float inreg %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_s_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v16, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v15, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v14, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v13, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v12, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v11, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v10, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v9, s0 -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB3_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v8 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v8 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v9 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v0, s10 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB3_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] +; GPRIDX-NEXT: v_mov_b32_e32 v7, s10 +; GPRIDX-NEXT: v_mov_b32_e32 v1, s2 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v1, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v1, s3 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, s4 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v3, s5 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, s6 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v5, s7 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v6, s8 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GPRIDX-NEXT: v_mov_b32_e32 v9, s9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_s_s_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: v_mov_b32_e32 v16, s7 -; MOVREL-NEXT: v_mov_b32_e32 v14, s5 -; MOVREL-NEXT: v_mov_b32_e32 v12, s3 -; MOVREL-NEXT: v_mov_b32_e32 v13, s4 -; MOVREL-NEXT: v_mov_b32_e32 v15, s6 -; MOVREL-NEXT: v_mov_b32_e32 v11, s2 -; MOVREL-NEXT: v_mov_b32_e32 v10, s1 -; MOVREL-NEXT: v_mov_b32_e32 v9, s0 -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_mov_b32_e32 v1, s2 +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; MOVREL-NEXT: v_mov_b32_e32 v2, s3 +; MOVREL-NEXT: v_mov_b32_e32 v3, s4 +; MOVREL-NEXT: v_mov_b32_e32 v4, s5 +; MOVREL-NEXT: v_mov_b32_e32 v5, s6 +; MOVREL-NEXT: v_cndmask_b32_e64 v8, v1, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 +; MOVREL-NEXT: v_mov_b32_e32 v6, s7 +; MOVREL-NEXT: v_mov_b32_e32 v7, s8 +; MOVREL-NEXT: v_mov_b32_e32 v9, s9 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB3_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v0 -; MOVREL-NEXT: v_mov_b32_e32 v1, v9 -; MOVREL-NEXT: v_mov_b32_e32 v2, v10 -; MOVREL-NEXT: v_mov_b32_e32 v3, v11 -; MOVREL-NEXT: v_mov_b32_e32 v4, v12 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v0 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v5, v13 -; MOVREL-NEXT: v_mov_b32_e32 v6, v14 -; MOVREL-NEXT: v_mov_b32_e32 v7, v15 -; MOVREL-NEXT: v_mov_b32_e32 v8, v16 -; MOVREL-NEXT: v_movreld_b32_e32 v1, s10 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB3_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v1 -; MOVREL-NEXT: v_mov_b32_e32 v1, v2 -; MOVREL-NEXT: v_mov_b32_e32 v2, v3 -; MOVREL-NEXT: v_mov_b32_e32 v3, v4 -; MOVREL-NEXT: v_mov_b32_e32 v4, v5 -; MOVREL-NEXT: v_mov_b32_e32 v5, v6 -; MOVREL-NEXT: v_mov_b32_e32 v6, v7 -; MOVREL-NEXT: v_mov_b32_e32 v7, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v1, v2, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 +; MOVREL-NEXT: v_cndmask_b32_e64 v2, v3, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 +; MOVREL-NEXT: v_cndmask_b32_e64 v3, v4, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; MOVREL-NEXT: v_cndmask_b32_e64 v4, v5, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; MOVREL-NEXT: v_cndmask_b32_e64 v5, v6, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; MOVREL-NEXT: v_cndmask_b32_e64 v6, v7, s10, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 +; MOVREL-NEXT: v_mov_b32_e32 v0, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v7, v9, s10, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -277,50 +320,101 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_v_s(<8 x float> inreg %vec, float %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 -; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 -; GPRIDX-NEXT: s_set_gpr_idx_on s10, gpr_idx(DST) +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v1, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v1, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v1, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 5 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v4, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 6 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v5, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 7 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: v_mov_b32_e32 v6, s8 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v7, s9 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_s_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: v_mov_b32_e32 v8, v0 -; MOVREL-NEXT: v_mov_b32_e32 v0, s0 -; MOVREL-NEXT: s_mov_b32 m0, s10 -; MOVREL-NEXT: v_mov_b32_e32 v1, s1 -; MOVREL-NEXT: v_mov_b32_e32 v2, s2 -; MOVREL-NEXT: v_mov_b32_e32 v3, s3 -; MOVREL-NEXT: v_mov_b32_e32 v4, s4 -; MOVREL-NEXT: v_mov_b32_e32 v5, s5 -; MOVREL-NEXT: v_mov_b32_e32 v6, s6 -; MOVREL-NEXT: v_mov_b32_e32 v7, s7 -; MOVREL-NEXT: v_movreld_b32_e32 v0, v8 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, s2, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s11, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, s3, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s11, 1, s11 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s11 +; MOVREL-NEXT: s_cselect_b32 s12, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, s4, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s12, 1, s12 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s12 +; MOVREL-NEXT: s_cselect_b32 s13, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s5, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s13, 1, s13 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 5 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s13 +; MOVREL-NEXT: s_cselect_b32 s14, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s6, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s0, 1, s14 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, s7, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 7 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, s8, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s2, 1, s2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, s9, v0, vcc_lo +; MOVREL-NEXT: v_mov_b32_e32 v0, v8 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -330,16 +424,92 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_s_s(<8 x float> %vec, float inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_set_gpr_idx_on s3, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, s2 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v8, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 5 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 6 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s3, 7 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 m0, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, s2 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s1, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s2, s1 +; MOVREL-NEXT: s_and_b32 s4, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s1, 0, s4 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, s1 +; MOVREL-NEXT: s_and_b32 s5, 1, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s4, 0, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s2, s4 +; MOVREL-NEXT: s_and_b32 s6, 1, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 5 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s4, 0, s6 +; MOVREL-NEXT: s_cselect_b32 s7, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, s4 +; MOVREL-NEXT: s_and_b32 s0, 1, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s7, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s2, s0 +; MOVREL-NEXT: s_and_b32 s1, 1, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s3, 7 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s0, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, s0 +; MOVREL-NEXT: s_and_b32 s3, 1, s3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 s0, 0, s3 +; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s2, s0 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -349,92 +519,55 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_v_v(<8 x float> inreg %vec, float %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_v_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v17, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v16, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v15, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v14, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v13, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v12, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v11, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v10, s0 -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB6_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v9 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v9 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v17 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v2, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, s3 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v2, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, s4 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v3, s5 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, s6 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v5, s7 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v6, s8 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v1 +; GPRIDX-NEXT: v_mov_b32_e32 v7, s9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB6_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] +; GPRIDX-NEXT: v_mov_b32_e32 v1, v9 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_s_v_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: v_mov_b32_e32 v17, s7 -; MOVREL-NEXT: v_mov_b32_e32 v15, s5 -; MOVREL-NEXT: v_mov_b32_e32 v13, s3 -; MOVREL-NEXT: v_mov_b32_e32 v14, s4 -; MOVREL-NEXT: v_mov_b32_e32 v16, s6 -; MOVREL-NEXT: v_mov_b32_e32 v12, s2 -; MOVREL-NEXT: v_mov_b32_e32 v11, s1 -; MOVREL-NEXT: v_mov_b32_e32 v10, s0 -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB6_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 -; MOVREL-NEXT: v_mov_b32_e32 v2, v10 -; MOVREL-NEXT: v_mov_b32_e32 v3, v11 -; MOVREL-NEXT: v_mov_b32_e32 v4, v12 -; MOVREL-NEXT: v_mov_b32_e32 v5, v13 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v1 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v6, v14 -; MOVREL-NEXT: v_mov_b32_e32 v7, v15 -; MOVREL-NEXT: v_mov_b32_e32 v8, v16 -; MOVREL-NEXT: v_mov_b32_e32 v9, v17 -; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB6_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v2 -; MOVREL-NEXT: v_mov_b32_e32 v1, v3 -; MOVREL-NEXT: v_mov_b32_e32 v2, v4 -; MOVREL-NEXT: v_mov_b32_e32 v3, v5 -; MOVREL-NEXT: v_mov_b32_e32 v4, v6 -; MOVREL-NEXT: v_mov_b32_e32 v5, v7 -; MOVREL-NEXT: v_mov_b32_e32 v6, v8 -; MOVREL-NEXT: v_mov_b32_e32 v7, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, s2, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v9, s3, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, s4, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s5, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s6, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, s7, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, s8, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v1 +; MOVREL-NEXT: v_mov_b32_e32 v1, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, s9, v0, vcc_lo +; MOVREL-NEXT: v_mov_b32_e32 v0, v8 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -444,66 +577,44 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_s_v(<8 x float> %vec, float inreg %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_s_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB7_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s3, v8 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s3, v8 -; GPRIDX-NEXT: s_set_gpr_idx_on s3, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v16, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v12, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v0 ; GPRIDX-NEXT: v_mov_b32_e32 v9, s2 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB7_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] -; GPRIDX-NEXT: v_mov_b32_e32 v0, v9 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v16 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_s_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB7_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v8 -; MOVREL-NEXT: v_mov_b32_e32 v16, v7 -; MOVREL-NEXT: v_mov_b32_e32 v9, v0 -; MOVREL-NEXT: v_mov_b32_e32 v15, v6 -; MOVREL-NEXT: v_mov_b32_e32 v14, v5 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v8 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v13, v4 -; MOVREL-NEXT: v_mov_b32_e32 v12, v3 -; MOVREL-NEXT: v_mov_b32_e32 v11, v2 -; MOVREL-NEXT: v_mov_b32_e32 v10, v1 -; MOVREL-NEXT: v_movreld_b32_e32 v9, s2 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB7_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v9 -; MOVREL-NEXT: v_mov_b32_e32 v1, v10 -; MOVREL-NEXT: v_mov_b32_e32 v2, v11 -; MOVREL-NEXT: v_mov_b32_e32 v3, v12 -; MOVREL-NEXT: v_mov_b32_e32 v4, v13 -; MOVREL-NEXT: v_mov_b32_e32 v5, v14 -; MOVREL-NEXT: v_mov_b32_e32 v6, v15 -; MOVREL-NEXT: v_mov_b32_e32 v7, v16 +; MOVREL-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v4, v4, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v5, v5, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v6, v6, s2, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v8 +; MOVREL-NEXT: v_cndmask_b32_e64 v7, v7, s2, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -513,16 +624,91 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_s(<8 x float> %vec, float %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 5 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 6 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 7 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 m0, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, v8 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s4, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s5, 1, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 5 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s0, 1, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 7 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s2, 1, s2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -532,16 +718,91 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8p3i8_v_v_s(<8 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8p3i8_v_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 5 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 6 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 7 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8p3i8_v_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 m0, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, v8 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s4, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s5, 1, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 5 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s0, 1, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 7 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo +; MOVREL-NEXT: s_and_b32 s2, 1, s2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 %idx @@ -553,66 +814,43 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_v(<8 x float> %vec, float %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB10_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v9 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v9 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v17, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v16, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v12, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB10_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] -; GPRIDX-NEXT: v_mov_b32_e32 v0, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v17 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB10_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v9 -; MOVREL-NEXT: v_mov_b32_e32 v17, v7 -; MOVREL-NEXT: v_mov_b32_e32 v10, v0 -; MOVREL-NEXT: v_mov_b32_e32 v16, v6 -; MOVREL-NEXT: v_mov_b32_e32 v15, v5 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v9 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v14, v4 -; MOVREL-NEXT: v_mov_b32_e32 v13, v3 -; MOVREL-NEXT: v_mov_b32_e32 v12, v2 -; MOVREL-NEXT: v_mov_b32_e32 v11, v1 -; MOVREL-NEXT: v_movreld_b32_e32 v10, v8 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB10_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v10 -; MOVREL-NEXT: v_mov_b32_e32 v1, v11 -; MOVREL-NEXT: v_mov_b32_e32 v2, v12 -; MOVREL-NEXT: v_mov_b32_e32 v3, v13 -; MOVREL-NEXT: v_mov_b32_e32 v4, v14 -; MOVREL-NEXT: v_mov_b32_e32 v5, v15 -; MOVREL-NEXT: v_mov_b32_e32 v6, v16 -; MOVREL-NEXT: v_mov_b32_e32 v7, v17 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <8 x float> %vec, float %val, i32 %idx @@ -1543,22 +1781,41 @@ define amdgpu_ps <3 x i32> @dyn_insertelement_v3i32_s_s_s(<3 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v3i32_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 m0, s6 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s0, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s5, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s6, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s5, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s6, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s5, s4 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v3i32_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 m0, s6 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s6, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: s_movreld_b32 s0, s5 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s5, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s6, 1 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s5, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s6, 2 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s5, s4 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <3 x i32> %vec, i32 %val, i32 %idx @@ -1568,16 +1825,41 @@ define amdgpu_ps <3 x float> @dyn_insertelement_v3i32_v_v_s(<3 x float> %vec, float %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v3i32_v_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v3 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v3i32_v_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 m0, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, v3 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo +; MOVREL-NEXT: s_and_b32 s2, 1, s2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <3 x float> %vec, float %val, i32 %idx @@ -1587,26 +1869,61 @@ define amdgpu_ps <5 x i32> @dyn_insertelement_v5i32_s_s_s(<5 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5i32_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 m0, s8 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s0, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s8, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s7, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s8, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s7, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s8, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s7, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s8, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s7, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s8, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s7, s6 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5i32_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 m0, s8 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_movreld_b32 s0, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s8, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s7, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s8, 1 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s7, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s8, 2 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s7, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s8, 3 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s7, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s8, 4 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s7, s6 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <5 x i32> %vec, i32 %val, i32 %idx @@ -1616,16 +1933,61 @@ define amdgpu_ps <5 x float> @dyn_insertelement_v5i32_v_v_s(<5 x float> %vec, float %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5i32_v_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v5 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5i32_v_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 m0, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, v5 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc_lo +; MOVREL-NEXT: s_and_b32 s4, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo +; MOVREL-NEXT: s_and_b32 s0, 1, s2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <5 x float> %vec, float %val, i32 %idx @@ -1737,17 +2099,47 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_s_add_1(<8 x float> inreg %vec, float inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_s_s_add_1: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 m0, s11 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s1, s10 +; GPRIDX-NEXT: s_add_i32 s11, s11, 1 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: s_and_b32 s5, s5, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s5, 0 +; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 +; GPRIDX-NEXT: s_cselect_b32 s7, 1, 0 +; GPRIDX-NEXT: s_and_b32 s7, s7, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s7, 0 +; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 @@ -1760,25 +2152,56 @@ ; ; MOVREL-LABEL: dyn_insertelement_v8f32_s_s_s_add_1: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 m0, s11 -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_movreld_b32 s1, s10 +; MOVREL-NEXT: s_add_i32 s11, s11, 1 +; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: s_and_b32 s5, s5, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s5, 0 +; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 +; MOVREL-NEXT: s_cselect_b32 s7, 1, 0 +; MOVREL-NEXT: s_and_b32 s7, s7, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s7, 0 +; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 -; MOVREL-NEXT: ; implicit-def: $vcc_hi ; MOVREL-NEXT: ; return to shader part epilog entry: %idx.add = add i32 %idx, 1 @@ -1789,17 +2212,47 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_s_s_s_add_7(<8 x float> inreg %vec, float inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_s_s_s_add_7: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 m0, s11 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s7, s10 +; GPRIDX-NEXT: s_add_i32 s11, s11, 7 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s10, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s10, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s10, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s10, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s10, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 5 +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: s_and_b32 s5, s5, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s5, 0 +; GPRIDX-NEXT: s_cselect_b32 s5, s10, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 6 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s6, s10, s8 +; GPRIDX-NEXT: s_cmp_eq_u32 s11, 7 +; GPRIDX-NEXT: s_cselect_b32 s7, 1, 0 +; GPRIDX-NEXT: s_and_b32 s7, s7, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s7, 0 +; GPRIDX-NEXT: s_cselect_b32 s7, s10, s9 ; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 ; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 ; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 @@ -1812,25 +2265,56 @@ ; ; MOVREL-LABEL: dyn_insertelement_v8f32_s_s_s_add_7: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_mov_b32 m0, s11 -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_movreld_b32 s7, s10 +; MOVREL-NEXT: s_add_i32 s11, s11, 7 +; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cmp_eq_u32 s11, 0 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s10, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 1 ; MOVREL-NEXT: v_mov_b32_e32 v0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s10, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 2 ; MOVREL-NEXT: v_mov_b32_e32 v1, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s10, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 3 ; MOVREL-NEXT: v_mov_b32_e32 v2, s2 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s10, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 4 ; MOVREL-NEXT: v_mov_b32_e32 v3, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s10, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 5 ; MOVREL-NEXT: v_mov_b32_e32 v4, s4 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: s_and_b32 s5, s5, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s5, 0 +; MOVREL-NEXT: s_cselect_b32 s5, s10, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 6 ; MOVREL-NEXT: v_mov_b32_e32 v5, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b32 s6, s10, s8 +; MOVREL-NEXT: s_cmp_eq_u32 s11, 7 ; MOVREL-NEXT: v_mov_b32_e32 v6, s6 +; MOVREL-NEXT: s_cselect_b32 s7, 1, 0 +; MOVREL-NEXT: s_and_b32 s7, s7, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s7, 0 +; MOVREL-NEXT: s_cselect_b32 s7, s10, s9 ; MOVREL-NEXT: v_mov_b32_e32 v7, s7 -; MOVREL-NEXT: ; implicit-def: $vcc_hi ; MOVREL-NEXT: ; return to shader part epilog entry: %idx.add = add i32 %idx, 7 @@ -1841,66 +2325,45 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_v_add_1(<8 x float> %vec, float %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_v_add_1: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB29_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v9 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v9 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v17, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v16, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v12, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB29_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] -; GPRIDX-NEXT: v_mov_b32_e32 v0, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v17 +; GPRIDX-NEXT: v_add_u32_e32 v9, 1, v9 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_v_add_1: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_add_nc_u32_e32 v9, 1, v9 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB29_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v9 -; MOVREL-NEXT: v_mov_b32_e32 v17, v7 -; MOVREL-NEXT: v_mov_b32_e32 v11, v1 -; MOVREL-NEXT: v_mov_b32_e32 v16, v6 -; MOVREL-NEXT: v_mov_b32_e32 v15, v5 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v9 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v14, v4 -; MOVREL-NEXT: v_mov_b32_e32 v13, v3 -; MOVREL-NEXT: v_mov_b32_e32 v12, v2 -; MOVREL-NEXT: v_mov_b32_e32 v10, v0 -; MOVREL-NEXT: v_movreld_b32_e32 v11, v8 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB29_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v10 -; MOVREL-NEXT: v_mov_b32_e32 v1, v11 -; MOVREL-NEXT: v_mov_b32_e32 v2, v12 -; MOVREL-NEXT: v_mov_b32_e32 v3, v13 -; MOVREL-NEXT: v_mov_b32_e32 v4, v14 -; MOVREL-NEXT: v_mov_b32_e32 v5, v15 -; MOVREL-NEXT: v_mov_b32_e32 v6, v16 -; MOVREL-NEXT: v_mov_b32_e32 v7, v17 +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %idx.add = add i32 %idx, 1 @@ -1911,66 +2374,45 @@ define amdgpu_ps <8 x float> @dyn_insertelement_v8f32_v_v_v_add_7(<8 x float> %vec, float %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v8f32_v_v_v_add_7: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB30_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v9 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v9 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v17, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v16, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v12, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v17, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB30_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] -; GPRIDX-NEXT: v_mov_b32_e32 v0, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v17 +; GPRIDX-NEXT: v_add_u32_e32 v9, 7, v9 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v8f32_v_v_v_add_7: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_add_nc_u32_e32 v9, 7, v9 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB30_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v9 -; MOVREL-NEXT: v_mov_b32_e32 v17, v7 -; MOVREL-NEXT: v_mov_b32_e32 v16, v6 -; MOVREL-NEXT: v_mov_b32_e32 v15, v5 -; MOVREL-NEXT: v_mov_b32_e32 v14, v4 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v9 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v13, v3 -; MOVREL-NEXT: v_mov_b32_e32 v12, v2 -; MOVREL-NEXT: v_mov_b32_e32 v11, v1 -; MOVREL-NEXT: v_mov_b32_e32 v10, v0 -; MOVREL-NEXT: v_movreld_b32_e32 v17, v8 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB30_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v10 -; MOVREL-NEXT: v_mov_b32_e32 v1, v11 -; MOVREL-NEXT: v_mov_b32_e32 v2, v12 -; MOVREL-NEXT: v_mov_b32_e32 v3, v13 -; MOVREL-NEXT: v_mov_b32_e32 v4, v14 -; MOVREL-NEXT: v_mov_b32_e32 v5, v15 -; MOVREL-NEXT: v_mov_b32_e32 v6, v16 -; MOVREL-NEXT: v_mov_b32_e32 v7, v17 +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v9 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %idx.add = add i32 %idx, 7 @@ -3401,30 +3843,81 @@ define amdgpu_ps <7 x i32> @dyn_insertelement_v7i32_s_s_s(<7 x i32> inreg %vec, i32 inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v7i32_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 m0, s10 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s0, s9 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s9, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s9, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s9, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s9, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s9, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 5 +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: s_and_b32 s5, s5, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s5, 0 +; GPRIDX-NEXT: s_cselect_b32 s5, s9, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 6 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s6, s9, s8 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v7i32_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 m0, s10 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_movreld_b32 s0, s9 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s9, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 1 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s9, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 2 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s9, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 3 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s9, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 4 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s9, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 5 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: s_and_b32 s5, s5, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s5, 0 +; MOVREL-NEXT: s_cselect_b32 s5, s9, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 6 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b32 s6, s9, s8 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x i32> %vec, i32 %val, i32 %idx @@ -3434,30 +3927,81 @@ define amdgpu_ps <7 x i8 addrspace(3)*> @dyn_insertelement_v7p3i8_s_s_s(<7 x i8 addrspace(3)*> inreg %vec, i8 addrspace(3)* inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v7p3i8_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 m0, s10 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b32 s0, s9 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, s9, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: s_and_b32 s1, s1, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s1, 0 +; GPRIDX-NEXT: s_cselect_b32 s1, s9, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 2 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s2, s9, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 3 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, s3, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s3, 0 +; GPRIDX-NEXT: s_cselect_b32 s3, s9, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 4 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b32 s4, s9, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 5 +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: s_and_b32 s5, s5, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s5, 0 +; GPRIDX-NEXT: s_cselect_b32 s5, s9, s7 +; GPRIDX-NEXT: s_cmp_eq_u32 s10, 6 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b32 s6, s9, s8 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v7p3i8_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 m0, s10 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_movreld_b32 s0, s9 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b32 s0, s9, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 1 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: s_and_b32 s1, s1, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s1, 0 +; MOVREL-NEXT: s_cselect_b32 s1, s9, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 2 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b32 s2, s9, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 3 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: s_and_b32 s3, s3, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s3, 0 +; MOVREL-NEXT: s_cselect_b32 s3, s9, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 4 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b32 s4, s9, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 5 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: s_and_b32 s5, s5, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s5, 0 +; MOVREL-NEXT: s_cselect_b32 s5, s9, s7 +; MOVREL-NEXT: s_cmp_eq_u32 s10, 6 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b32 s6, s9, s8 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x i8 addrspace(3)*> %vec, i8 addrspace(3)* %val, i32 %idx @@ -3467,48 +4011,90 @@ define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_s_v_s(<7 x float> inreg %vec, float %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v7f32_s_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v0, s0 -; GPRIDX-NEXT: v_mov_b32_e32 v1, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v6, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v7, s7 -; GPRIDX-NEXT: s_set_gpr_idx_on s9, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v1, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v1, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v1, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 5 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_mov_b32_e32 v4, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s9, 6 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: v_mov_b32_e32 v5, s7 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v6, s8 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v0, v7 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v7f32_s_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: v_mov_b32_e32 v8, v0 -; MOVREL-NEXT: v_mov_b32_e32 v0, s0 -; MOVREL-NEXT: s_mov_b32 m0, s9 -; MOVREL-NEXT: v_mov_b32_e32 v1, s1 -; MOVREL-NEXT: v_mov_b32_e32 v2, s2 -; MOVREL-NEXT: v_mov_b32_e32 v3, s3 -; MOVREL-NEXT: v_mov_b32_e32 v4, s4 -; MOVREL-NEXT: v_mov_b32_e32 v5, s5 -; MOVREL-NEXT: v_mov_b32_e32 v6, s6 -; MOVREL-NEXT: v_mov_b32_e32 v7, s7 -; MOVREL-NEXT: v_movreld_b32_e32 v0, v8 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, s2, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s10, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, s3, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s10, 1, s10 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s10 +; MOVREL-NEXT: s_cselect_b32 s11, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, s4, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s11, 1, s11 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s11 +; MOVREL-NEXT: s_cselect_b32 s12, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s5, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s12, 1, s12 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 5 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s12 +; MOVREL-NEXT: s_cselect_b32 s13, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s6, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s0, 1, s13 +; MOVREL-NEXT: s_cmp_eq_u32 s9, 6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, s7, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, s8, v0, vcc_lo +; MOVREL-NEXT: v_mov_b32_e32 v0, v7 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x float> %vec, float %val, i32 %idx @@ -3518,89 +4104,50 @@ define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_s_v_v(<7 x float> inreg %vec, float %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v7f32_s_v_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v17, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v16, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v15, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v14, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v13, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v12, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v11, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v10, s0 -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB46_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v9 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v9 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v17 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v2, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, s3 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v2, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, s4 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v3, s5 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, s6 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v5, s7 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v1 +; GPRIDX-NEXT: v_mov_b32_e32 v6, s8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc ; GPRIDX-NEXT: v_mov_b32_e32 v0, v8 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB46_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] +; GPRIDX-NEXT: v_mov_b32_e32 v1, v7 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v7f32_s_v_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: v_mov_b32_e32 v17, s7 -; MOVREL-NEXT: v_mov_b32_e32 v13, s3 -; MOVREL-NEXT: v_mov_b32_e32 v14, s4 -; MOVREL-NEXT: v_mov_b32_e32 v15, s5 -; MOVREL-NEXT: v_mov_b32_e32 v16, s6 -; MOVREL-NEXT: v_mov_b32_e32 v12, s2 -; MOVREL-NEXT: v_mov_b32_e32 v11, s1 -; MOVREL-NEXT: v_mov_b32_e32 v10, s0 -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB46_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 -; MOVREL-NEXT: v_mov_b32_e32 v2, v10 -; MOVREL-NEXT: v_mov_b32_e32 v3, v11 -; MOVREL-NEXT: v_mov_b32_e32 v4, v12 -; MOVREL-NEXT: v_mov_b32_e32 v5, v13 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v1 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v6, v14 -; MOVREL-NEXT: v_mov_b32_e32 v7, v15 -; MOVREL-NEXT: v_mov_b32_e32 v8, v16 -; MOVREL-NEXT: v_mov_b32_e32 v9, v17 -; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB46_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v2 -; MOVREL-NEXT: v_mov_b32_e32 v1, v3 -; MOVREL-NEXT: v_mov_b32_e32 v2, v4 -; MOVREL-NEXT: v_mov_b32_e32 v3, v5 -; MOVREL-NEXT: v_mov_b32_e32 v4, v6 -; MOVREL-NEXT: v_mov_b32_e32 v5, v7 -; MOVREL-NEXT: v_mov_b32_e32 v6, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, s2, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, s3, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, s4, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s5, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s6, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, s7, v0, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v1 +; MOVREL-NEXT: v_mov_b32_e32 v1, v7 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, s8, v0, vcc_lo +; MOVREL-NEXT: v_mov_b32_e32 v0, v8 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x float> %vec, float %val, i32 %idx @@ -3610,16 +4157,81 @@ define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_v_v_s(<7 x float> %vec, float %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v7f32_v_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v7 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 5 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 6 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v7f32_v_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 m0, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, v7 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s3, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s3 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 3 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; MOVREL-NEXT: s_and_b32 s4, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4 +; MOVREL-NEXT: s_cselect_b32 s5, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo +; MOVREL-NEXT: s_and_b32 s5, 1, s5 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 5 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo +; MOVREL-NEXT: s_and_b32 s0, 1, s6 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x float> %vec, float %val, i32 %idx @@ -3629,64 +4241,39 @@ define amdgpu_ps <7 x float> @dyn_insertelement_v7f32_v_v_v(<7 x float> %vec, float %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v7f32_v_v_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB48_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v8 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v8 -; GPRIDX-NEXT: s_set_gpr_idx_on s2, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v16, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v12, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v7 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB48_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] -; GPRIDX-NEXT: v_mov_b32_e32 v0, v9 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v2, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v15 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v8 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v7f32_v_v_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB48_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v8 -; MOVREL-NEXT: v_mov_b32_e32 v16, v7 -; MOVREL-NEXT: v_mov_b32_e32 v9, v0 -; MOVREL-NEXT: v_mov_b32_e32 v15, v6 -; MOVREL-NEXT: v_mov_b32_e32 v14, v5 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v8 -; MOVREL-NEXT: s_mov_b32 m0, s1 -; MOVREL-NEXT: v_mov_b32_e32 v13, v4 -; MOVREL-NEXT: v_mov_b32_e32 v12, v3 -; MOVREL-NEXT: v_mov_b32_e32 v11, v2 -; MOVREL-NEXT: v_mov_b32_e32 v10, v1 -; MOVREL-NEXT: v_movreld_b32_e32 v9, v7 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB48_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_mov_b32_e32 v0, v9 -; MOVREL-NEXT: v_mov_b32_e32 v1, v10 -; MOVREL-NEXT: v_mov_b32_e32 v2, v11 -; MOVREL-NEXT: v_mov_b32_e32 v3, v12 -; MOVREL-NEXT: v_mov_b32_e32 v4, v13 -; MOVREL-NEXT: v_mov_b32_e32 v5, v14 -; MOVREL-NEXT: v_mov_b32_e32 v6, v15 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc_lo ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x float> %vec, float %val, i32 %idx @@ -4169,36 +4756,61 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_s_s_s(<5 x double> inreg %vec, double inreg %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5f64_s_s_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 s8, s10 -; GPRIDX-NEXT: s_mov_b32 s9, s11 -; GPRIDX-NEXT: s_mov_b32 m0, s14 -; GPRIDX-NEXT: s_nop 0 -; GPRIDX-NEXT: s_movreld_b64 s[0:1], s[12:13] +; GPRIDX-NEXT: s_cmp_eq_u32 s14, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, s0, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s0, 0 +; GPRIDX-NEXT: s_cselect_b64 s[0:1], s[12:13], s[2:3] +; GPRIDX-NEXT: s_cmp_eq_u32 s14, 1 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: s_and_b32 s2, s2, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b64 s[2:3], s[12:13], s[4:5] +; GPRIDX-NEXT: s_cmp_eq_u32 s14, 2 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, s4, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s4, 0 +; GPRIDX-NEXT: s_cselect_b64 s[4:5], s[12:13], s[6:7] +; GPRIDX-NEXT: s_cmp_eq_u32 s14, 3 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: s_and_b32 s6, s6, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s6, 0 +; GPRIDX-NEXT: s_cselect_b64 s[6:7], s[12:13], s[8:9] +; GPRIDX-NEXT: s_cmp_eq_u32 s14, 4 +; GPRIDX-NEXT: s_cselect_b32 s8, 1, 0 +; GPRIDX-NEXT: s_and_b32 s8, s8, 1 +; GPRIDX-NEXT: s_cmp_lg_u32 s8, 0 +; GPRIDX-NEXT: s_cselect_b64 s[8:9], s[12:13], s[10:11] ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5f64_s_s_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 m0, s14 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_mov_b32 s8, s10 -; MOVREL-NEXT: s_mov_b32 s9, s11 -; MOVREL-NEXT: s_movreld_b64 s[0:1], s[12:13] +; MOVREL-NEXT: s_cmp_eq_u32 s14, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, s0, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s0, 0 +; MOVREL-NEXT: s_cselect_b64 s[0:1], s[12:13], s[2:3] +; MOVREL-NEXT: s_cmp_eq_u32 s14, 1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 +; MOVREL-NEXT: s_and_b32 s2, s2, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s2, 0 +; MOVREL-NEXT: s_cselect_b64 s[2:3], s[12:13], s[4:5] +; MOVREL-NEXT: s_cmp_eq_u32 s14, 2 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: s_and_b32 s4, s4, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s4, 0 +; MOVREL-NEXT: s_cselect_b64 s[4:5], s[12:13], s[6:7] +; MOVREL-NEXT: s_cmp_eq_u32 s14, 3 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: s_and_b32 s6, s6, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s6, 0 +; MOVREL-NEXT: s_cselect_b64 s[6:7], s[12:13], s[8:9] +; MOVREL-NEXT: s_cmp_eq_u32 s14, 4 +; MOVREL-NEXT: s_cselect_b32 s8, 1, 0 +; MOVREL-NEXT: s_and_b32 s8, s8, 1 +; MOVREL-NEXT: s_cmp_lg_u32 s8, 0 +; MOVREL-NEXT: s_cselect_b64 s[8:9], s[12:13], s[10:11] ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <5 x double> %vec, double %val, i32 %idx @@ -4208,91 +4820,103 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_s_v_s(<5 x double> inreg %vec, double %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5f64_s_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 s8, s10 -; GPRIDX-NEXT: s_mov_b32 s9, s11 -; GPRIDX-NEXT: v_mov_b32_e32 v17, s15 -; GPRIDX-NEXT: v_mov_b32_e32 v16, s14 -; GPRIDX-NEXT: v_mov_b32_e32 v15, s13 -; GPRIDX-NEXT: v_mov_b32_e32 v14, s12 -; GPRIDX-NEXT: v_mov_b32_e32 v13, s11 -; GPRIDX-NEXT: v_mov_b32_e32 v12, s10 -; GPRIDX-NEXT: v_mov_b32_e32 v11, s9 -; GPRIDX-NEXT: v_mov_b32_e32 v10, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v9, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v8, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v7, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v6, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v5, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v4, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v3, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v2, s0 -; GPRIDX-NEXT: s_lshl_b32 s0, s12, 1 -; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v2, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v1 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s12, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s12, 1 +; GPRIDX-NEXT: s_cselect_b32 s1, 1, 0 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s2 +; GPRIDX-NEXT: s_and_b32 s2, 1, s1 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s12, 2 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v2 -; GPRIDX-NEXT: v_readfirstlane_b32 s1, v3 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v4 -; GPRIDX-NEXT: v_readfirstlane_b32 s3, v5 -; GPRIDX-NEXT: v_readfirstlane_b32 s4, v6 -; GPRIDX-NEXT: v_readfirstlane_b32 s5, v7 -; GPRIDX-NEXT: v_readfirstlane_b32 s6, v8 -; GPRIDX-NEXT: v_readfirstlane_b32 s7, v9 -; GPRIDX-NEXT: v_readfirstlane_b32 s8, v10 -; GPRIDX-NEXT: v_readfirstlane_b32 s9, v11 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v3, v1, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v3, s4 +; GPRIDX-NEXT: s_and_b32 s4, 1, s3 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GPRIDX-NEXT: s_cmp_eq_u32 s12, 3 +; GPRIDX-NEXT: v_mov_b32_e32 v4, s5 +; GPRIDX-NEXT: v_readfirstlane_b32 s1, v2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v3, v0, vcc +; GPRIDX-NEXT: s_cselect_b32 s5, 1, 0 +; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s6 +; GPRIDX-NEXT: s_and_b32 s6, 1, s5 +; GPRIDX-NEXT: s_cmp_eq_u32 s12, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v5, s7 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4 +; GPRIDX-NEXT: s_cselect_b32 s7, 1, 0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v2, v0, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v1, vcc +; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s8 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s9 +; GPRIDX-NEXT: s_and_b32 s8, 1, s7 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v2, s10 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s8 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s11 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v2, v0, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v3, v1, vcc +; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 +; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 +; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 +; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 +; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 +; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5f64_s_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_mov_b32 s8, s10 -; MOVREL-NEXT: s_mov_b32 s9, s11 -; MOVREL-NEXT: v_mov_b32_e32 v17, s15 -; MOVREL-NEXT: v_mov_b32_e32 v2, s0 -; MOVREL-NEXT: s_lshl_b32 m0, s12, 1 -; MOVREL-NEXT: v_mov_b32_e32 v16, s14 -; MOVREL-NEXT: v_mov_b32_e32 v15, s13 -; MOVREL-NEXT: v_mov_b32_e32 v14, s12 -; MOVREL-NEXT: v_mov_b32_e32 v13, s11 -; MOVREL-NEXT: v_mov_b32_e32 v12, s10 -; MOVREL-NEXT: v_mov_b32_e32 v11, s9 -; MOVREL-NEXT: v_mov_b32_e32 v10, s8 -; MOVREL-NEXT: v_mov_b32_e32 v9, s7 -; MOVREL-NEXT: v_mov_b32_e32 v8, s6 -; MOVREL-NEXT: v_mov_b32_e32 v7, s5 -; MOVREL-NEXT: v_mov_b32_e32 v6, s4 -; MOVREL-NEXT: v_mov_b32_e32 v5, s3 -; MOVREL-NEXT: v_mov_b32_e32 v4, s2 -; MOVREL-NEXT: v_mov_b32_e32 v3, s1 -; MOVREL-NEXT: v_movreld_b32_e32 v2, v0 -; MOVREL-NEXT: v_movreld_b32_e32 v3, v1 +; MOVREL-NEXT: s_cmp_eq_u32 s12, 0 +; MOVREL-NEXT: s_mov_b32 s14, s10 +; MOVREL-NEXT: s_mov_b32 s15, s11 +; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s12, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, s2, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s1, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s12, 2 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s3, v1, vcc_lo ; MOVREL-NEXT: v_readfirstlane_b32 s0, v2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 ; MOVREL-NEXT: v_readfirstlane_b32 s1, v3 -; MOVREL-NEXT: v_readfirstlane_b32 s2, v4 -; MOVREL-NEXT: v_readfirstlane_b32 s3, v5 -; MOVREL-NEXT: v_readfirstlane_b32 s4, v6 -; MOVREL-NEXT: v_readfirstlane_b32 s5, v7 -; MOVREL-NEXT: v_readfirstlane_b32 s6, v8 -; MOVREL-NEXT: v_readfirstlane_b32 s7, v9 -; MOVREL-NEXT: v_readfirstlane_b32 s8, v10 -; MOVREL-NEXT: v_readfirstlane_b32 s9, v11 -; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: v_cndmask_b32_e32 v2, s4, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s2 +; MOVREL-NEXT: s_cmp_eq_u32 s12, 3 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s5, v1, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 +; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s6, v0, vcc_lo +; MOVREL-NEXT: s_and_b32 s5, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s12, 4 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, s7, v1, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 +; MOVREL-NEXT: s_cselect_b32 s6, 1, 0 +; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, s9, v1, vcc_lo +; MOVREL-NEXT: s_and_b32 s6, 1, s6 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, s8, v0, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s6 +; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, s14, v0, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v9, s15, v1, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 +; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <5 x double> %vec, double %val, i32 %idx @@ -4302,143 +4926,73 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_s_v_v(<5 x double> inreg %vec, double %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5f64_s_v_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: s_mov_b32 s8, s10 -; GPRIDX-NEXT: s_mov_b32 s9, s11 -; GPRIDX-NEXT: v_mov_b32_e32 v34, s15 -; GPRIDX-NEXT: v_mov_b32_e32 v33, s14 -; GPRIDX-NEXT: v_mov_b32_e32 v32, s13 -; GPRIDX-NEXT: v_mov_b32_e32 v31, s12 -; GPRIDX-NEXT: v_mov_b32_e32 v30, s11 -; GPRIDX-NEXT: v_mov_b32_e32 v29, s10 -; GPRIDX-NEXT: v_mov_b32_e32 v28, s9 -; GPRIDX-NEXT: v_mov_b32_e32 v27, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v26, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v25, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v24, s5 -; GPRIDX-NEXT: v_mov_b32_e32 v23, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v22, s3 -; GPRIDX-NEXT: v_mov_b32_e32 v21, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v20, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v19, s0 -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB56_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 -; GPRIDX-NEXT: s_lshl_b32 s3, s2, 1 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v2 -; GPRIDX-NEXT: s_set_gpr_idx_on s3, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v3, v19 -; GPRIDX-NEXT: v_mov_b32_e32 v4, v20 -; GPRIDX-NEXT: v_mov_b32_e32 v5, v21 -; GPRIDX-NEXT: v_mov_b32_e32 v6, v22 -; GPRIDX-NEXT: v_mov_b32_e32 v7, v23 -; GPRIDX-NEXT: v_mov_b32_e32 v8, v24 -; GPRIDX-NEXT: v_mov_b32_e32 v9, v25 -; GPRIDX-NEXT: v_mov_b32_e32 v10, v26 -; GPRIDX-NEXT: v_mov_b32_e32 v11, v27 -; GPRIDX-NEXT: v_mov_b32_e32 v12, v28 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v29 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v30 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v31 -; GPRIDX-NEXT: v_mov_b32_e32 v16, v32 -; GPRIDX-NEXT: v_mov_b32_e32 v17, v33 -; GPRIDX-NEXT: v_mov_b32_e32 v18, v34 -; GPRIDX-NEXT: v_mov_b32_e32 v3, v0 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_set_gpr_idx_on s3, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v4, v1 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB56_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] +; GPRIDX-NEXT: v_mov_b32_e32 v3, s2 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, s3 ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v3 -; GPRIDX-NEXT: v_readfirstlane_b32 s1, v4 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v5 -; GPRIDX-NEXT: v_readfirstlane_b32 s3, v6 -; GPRIDX-NEXT: v_readfirstlane_b32 s4, v7 -; GPRIDX-NEXT: v_readfirstlane_b32 s5, v8 -; GPRIDX-NEXT: v_readfirstlane_b32 s6, v9 -; GPRIDX-NEXT: v_readfirstlane_b32 s7, v10 -; GPRIDX-NEXT: v_readfirstlane_b32 s8, v11 -; GPRIDX-NEXT: v_readfirstlane_b32 s9, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, s4 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 +; GPRIDX-NEXT: v_readfirstlane_b32 s1, v3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v4, v0, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v5, s5 +; GPRIDX-NEXT: v_readfirstlane_b32 s2, v3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v5, v1, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v4, s6 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v2 +; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s7 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc +; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s8 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v2 +; GPRIDX-NEXT: v_mov_b32_e32 v4, s9 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v3, v0, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v4, v1, vcc +; GPRIDX-NEXT: v_mov_b32_e32 v3, s10 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v2 +; GPRIDX-NEXT: v_mov_b32_e32 v4, s11 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v3, v0, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v4, v1, vcc +; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 +; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 +; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 +; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 +; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5f64_s_v_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: s_mov_b32 s8, s10 -; MOVREL-NEXT: s_mov_b32 s9, s11 -; MOVREL-NEXT: v_mov_b32_e32 v34, s15 -; MOVREL-NEXT: v_mov_b32_e32 v33, s14 -; MOVREL-NEXT: v_mov_b32_e32 v32, s13 -; MOVREL-NEXT: v_mov_b32_e32 v31, s12 -; MOVREL-NEXT: v_mov_b32_e32 v30, s11 -; MOVREL-NEXT: v_mov_b32_e32 v29, s10 -; MOVREL-NEXT: v_mov_b32_e32 v28, s9 -; MOVREL-NEXT: v_mov_b32_e32 v27, s8 -; MOVREL-NEXT: v_mov_b32_e32 v26, s7 -; MOVREL-NEXT: v_mov_b32_e32 v25, s6 -; MOVREL-NEXT: v_mov_b32_e32 v24, s5 -; MOVREL-NEXT: v_mov_b32_e32 v23, s4 -; MOVREL-NEXT: v_mov_b32_e32 v22, s3 -; MOVREL-NEXT: v_mov_b32_e32 v21, s2 -; MOVREL-NEXT: v_mov_b32_e32 v20, s1 -; MOVREL-NEXT: v_mov_b32_e32 v19, s0 -; MOVREL-NEXT: s_mov_b32 s0, exec_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 +; MOVREL-NEXT: s_mov_b32 s14, s10 +; MOVREL-NEXT: s_mov_b32 s15, s11 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB56_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v2 -; MOVREL-NEXT: v_mov_b32_e32 v3, v19 -; MOVREL-NEXT: v_mov_b32_e32 v4, v20 -; MOVREL-NEXT: v_mov_b32_e32 v5, v21 -; MOVREL-NEXT: v_mov_b32_e32 v6, v22 -; MOVREL-NEXT: s_lshl_b32 m0, s1, 1 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v2 -; MOVREL-NEXT: v_mov_b32_e32 v7, v23 -; MOVREL-NEXT: v_mov_b32_e32 v8, v24 -; MOVREL-NEXT: v_mov_b32_e32 v9, v25 -; MOVREL-NEXT: v_mov_b32_e32 v10, v26 -; MOVREL-NEXT: v_mov_b32_e32 v11, v27 -; MOVREL-NEXT: v_mov_b32_e32 v12, v28 -; MOVREL-NEXT: v_mov_b32_e32 v13, v29 -; MOVREL-NEXT: v_mov_b32_e32 v14, v30 -; MOVREL-NEXT: v_mov_b32_e32 v15, v31 -; MOVREL-NEXT: v_mov_b32_e32 v16, v32 -; MOVREL-NEXT: v_mov_b32_e32 v17, v33 -; MOVREL-NEXT: v_mov_b32_e32 v18, v34 -; MOVREL-NEXT: v_movreld_b32_e32 v3, v0 -; MOVREL-NEXT: v_movreld_b32_e32 v4, v1 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB56_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s2, v0, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s3, v1, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 ; MOVREL-NEXT: v_readfirstlane_b32 s0, v3 ; MOVREL-NEXT: v_readfirstlane_b32 s1, v4 -; MOVREL-NEXT: v_readfirstlane_b32 s2, v5 -; MOVREL-NEXT: v_readfirstlane_b32 s3, v6 -; MOVREL-NEXT: v_readfirstlane_b32 s4, v7 -; MOVREL-NEXT: v_readfirstlane_b32 s5, v8 -; MOVREL-NEXT: v_readfirstlane_b32 s6, v9 -; MOVREL-NEXT: v_readfirstlane_b32 s7, v10 -; MOVREL-NEXT: v_readfirstlane_b32 s8, v11 -; MOVREL-NEXT: v_readfirstlane_b32 s9, v12 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, s4, v0, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v4, s5, v1, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v2 +; MOVREL-NEXT: v_readfirstlane_b32 s2, v3 +; MOVREL-NEXT: v_readfirstlane_b32 s3, v4 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, s6, v0, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v6, s7, v1, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v2 +; MOVREL-NEXT: v_readfirstlane_b32 s4, v5 +; MOVREL-NEXT: v_readfirstlane_b32 s5, v6 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, s8, v0, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v8, s9, v1, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v2 +; MOVREL-NEXT: v_readfirstlane_b32 s6, v7 +; MOVREL-NEXT: v_readfirstlane_b32 s7, v8 +; MOVREL-NEXT: v_cndmask_b32_e32 v9, s14, v0, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v10, s15, v1, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s8, v9 +; MOVREL-NEXT: v_readfirstlane_b32 s9, v10 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <5 x double> %vec, double %val, i32 %idx @@ -4448,15 +5002,39 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_s(<5 x double> %vec, double %val, i32 inreg %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_s: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_lshl_b32 s0, s2, 1 -; GPRIDX-NEXT: v_mov_b32_e32 v16, v11 -; GPRIDX-NEXT: s_set_gpr_idx_on s0, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v1, v16 -; GPRIDX-NEXT: s_set_gpr_idx_off +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 0 +; GPRIDX-NEXT: s_cselect_b32 s0, 1, 0 +; GPRIDX-NEXT: s_and_b32 s0, 1, s0 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 1 +; GPRIDX-NEXT: s_cselect_b32 s3, 1, 0 +; GPRIDX-NEXT: s_and_b32 s3, 1, s3 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 2 +; GPRIDX-NEXT: s_cselect_b32 s4, 1, 0 +; GPRIDX-NEXT: s_and_b32 s4, 1, s4 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 3 +; GPRIDX-NEXT: s_cselect_b32 s6, 1, 0 +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GPRIDX-NEXT: s_and_b32 s6, 1, s6 +; GPRIDX-NEXT: s_cmp_eq_u32 s2, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s3 +; GPRIDX-NEXT: s_cselect_b32 s2, 1, 0 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6 +; GPRIDX-NEXT: s_and_b32 s2, 1, s2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc +; GPRIDX-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GPRIDX-NEXT: v_readfirstlane_b32 s10, v2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 ; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 ; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 ; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 @@ -4464,23 +5042,55 @@ ; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 ; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 ; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 +; GPRIDX-NEXT: s_mov_b32 s2, s10 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5f64_v_v_s: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_lshl_b32 m0, s2, 1 -; MOVREL-NEXT: v_mov_b32_e32 v16, v11 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 0 +; MOVREL-NEXT: v_mov_b32_e32 v15, v2 +; MOVREL-NEXT: v_mov_b32_e32 v12, v3 +; MOVREL-NEXT: v_mov_b32_e32 v19, v6 +; MOVREL-NEXT: v_mov_b32_e32 v16, v7 +; MOVREL-NEXT: s_cselect_b32 s0, 1, 0 ; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: v_movreld_b32_e32 v0, v10 -; MOVREL-NEXT: v_movreld_b32_e32 v1, v16 +; MOVREL-NEXT: s_and_b32 s0, 1, s0 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 1 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; MOVREL-NEXT: s_cselect_b32 s1, 1, 0 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s1 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 2 +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo ; MOVREL-NEXT: v_readfirstlane_b32 s0, v0 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 ; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 -; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v15, v10, vcc_lo +; MOVREL-NEXT: s_and_b32 s3, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 3 +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v12, v11, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s10, v2 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; MOVREL-NEXT: s_cselect_b32 s4, 1, 0 ; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo +; MOVREL-NEXT: s_and_b32 s5, 1, s4 +; MOVREL-NEXT: s_cmp_eq_u32 s2, 4 +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo ; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 +; MOVREL-NEXT: s_cselect_b32 s2, 1, 0 ; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v19, v10, vcc_lo +; MOVREL-NEXT: s_and_b32 s2, 1, s2 +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v16, v11, vcc_lo ; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 +; MOVREL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 ; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 +; MOVREL-NEXT: s_mov_b32 s2, s10 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo ; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 ; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 ; MOVREL-NEXT: ; return to shader part epilog @@ -4492,91 +5102,65 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_v(<5 x double> %vec, double %val, i32 %idx) { ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b64 s[0:1], exec -; GPRIDX-NEXT: BB58_1: ; =>This Inner Loop Header: Depth=1 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v12 -; GPRIDX-NEXT: s_lshl_b32 s3, s2, 1 -; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, s2, v12 -; GPRIDX-NEXT: s_set_gpr_idx_on s3, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v28, v15 -; GPRIDX-NEXT: v_mov_b32_e32 v27, v14 -; GPRIDX-NEXT: v_mov_b32_e32 v26, v13 -; GPRIDX-NEXT: v_mov_b32_e32 v25, v12 -; GPRIDX-NEXT: v_mov_b32_e32 v24, v11 -; GPRIDX-NEXT: v_mov_b32_e32 v23, v10 -; GPRIDX-NEXT: v_mov_b32_e32 v22, v9 -; GPRIDX-NEXT: v_mov_b32_e32 v21, v8 -; GPRIDX-NEXT: v_mov_b32_e32 v20, v7 -; GPRIDX-NEXT: v_mov_b32_e32 v19, v6 -; GPRIDX-NEXT: v_mov_b32_e32 v18, v5 -; GPRIDX-NEXT: v_mov_b32_e32 v17, v4 -; GPRIDX-NEXT: v_mov_b32_e32 v16, v3 -; GPRIDX-NEXT: v_mov_b32_e32 v15, v2 -; GPRIDX-NEXT: v_mov_b32_e32 v14, v1 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v13, v10 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_set_gpr_idx_on s3, gpr_idx(DST) -; GPRIDX-NEXT: v_mov_b32_e32 v14, v11 -; GPRIDX-NEXT: s_set_gpr_idx_off -; GPRIDX-NEXT: s_and_saveexec_b64 vcc, vcc -; GPRIDX-NEXT: s_xor_b64 exec, exec, vcc -; GPRIDX-NEXT: s_cbranch_execnz BB58_1 -; GPRIDX-NEXT: ; %bb.2: -; GPRIDX-NEXT: s_mov_b64 exec, s[0:1] -; GPRIDX-NEXT: v_readfirstlane_b32 s0, v13 -; GPRIDX-NEXT: v_readfirstlane_b32 s1, v14 -; GPRIDX-NEXT: v_readfirstlane_b32 s2, v15 -; GPRIDX-NEXT: v_readfirstlane_b32 s3, v16 -; GPRIDX-NEXT: v_readfirstlane_b32 s4, v17 -; GPRIDX-NEXT: v_readfirstlane_b32 s5, v18 -; GPRIDX-NEXT: v_readfirstlane_b32 s6, v19 -; GPRIDX-NEXT: v_readfirstlane_b32 s7, v20 -; GPRIDX-NEXT: v_readfirstlane_b32 s8, v21 -; GPRIDX-NEXT: v_readfirstlane_b32 s9, v22 +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc +; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 +; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 +; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 +; GPRIDX-NEXT: v_readfirstlane_b32 s3, v3 +; GPRIDX-NEXT: v_readfirstlane_b32 s4, v4 +; GPRIDX-NEXT: v_readfirstlane_b32 s5, v5 +; GPRIDX-NEXT: v_readfirstlane_b32 s6, v6 +; GPRIDX-NEXT: v_readfirstlane_b32 s7, v7 +; GPRIDX-NEXT: v_readfirstlane_b32 s8, v8 +; GPRIDX-NEXT: v_readfirstlane_b32 s9, v9 ; GPRIDX-NEXT: ; return to shader part epilog ; ; MOVREL-LABEL: dyn_insertelement_v5f64_v_v_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, exec_lo -; MOVREL-NEXT: ; implicit-def: $vcc_hi -; MOVREL-NEXT: BB58_1: ; =>This Inner Loop Header: Depth=1 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v12 -; MOVREL-NEXT: v_mov_b32_e32 v28, v15 -; MOVREL-NEXT: v_mov_b32_e32 v27, v14 -; MOVREL-NEXT: v_mov_b32_e32 v26, v13 -; MOVREL-NEXT: v_mov_b32_e32 v25, v12 -; MOVREL-NEXT: v_mov_b32_e32 v24, v11 -; MOVREL-NEXT: v_mov_b32_e32 v23, v10 -; MOVREL-NEXT: v_mov_b32_e32 v22, v9 -; MOVREL-NEXT: v_mov_b32_e32 v21, v8 -; MOVREL-NEXT: v_mov_b32_e32 v20, v7 -; MOVREL-NEXT: v_mov_b32_e32 v19, v6 -; MOVREL-NEXT: v_mov_b32_e32 v18, v5 -; MOVREL-NEXT: v_mov_b32_e32 v17, v4 -; MOVREL-NEXT: v_mov_b32_e32 v16, v3 +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v12 ; MOVREL-NEXT: v_mov_b32_e32 v15, v2 -; MOVREL-NEXT: v_mov_b32_e32 v14, v1 -; MOVREL-NEXT: v_mov_b32_e32 v13, v0 -; MOVREL-NEXT: s_lshl_b32 m0, s1, 1 -; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, s1, v12 -; MOVREL-NEXT: v_movreld_b32_e32 v13, v10 -; MOVREL-NEXT: v_movreld_b32_e32 v14, v11 -; MOVREL-NEXT: s_and_saveexec_b32 vcc_lo, vcc_lo -; MOVREL-NEXT: s_xor_b32 exec_lo, exec_lo, vcc_lo -; MOVREL-NEXT: s_cbranch_execnz BB58_1 -; MOVREL-NEXT: ; %bb.2: -; MOVREL-NEXT: s_mov_b32 exec_lo, s0 -; MOVREL-NEXT: v_readfirstlane_b32 s0, v13 -; MOVREL-NEXT: v_readfirstlane_b32 s1, v14 -; MOVREL-NEXT: v_readfirstlane_b32 s2, v15 -; MOVREL-NEXT: v_readfirstlane_b32 s3, v16 -; MOVREL-NEXT: v_readfirstlane_b32 s4, v17 -; MOVREL-NEXT: v_readfirstlane_b32 s5, v18 -; MOVREL-NEXT: v_readfirstlane_b32 s6, v19 -; MOVREL-NEXT: v_readfirstlane_b32 s7, v20 -; MOVREL-NEXT: v_readfirstlane_b32 s8, v21 -; MOVREL-NEXT: v_readfirstlane_b32 s9, v22 +; MOVREL-NEXT: v_mov_b32_e32 v16, v3 +; MOVREL-NEXT: v_mov_b32_e32 v19, v6 +; MOVREL-NEXT: v_mov_b32_e32 v20, v7 +; MOVREL-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12 +; MOVREL-NEXT: ; implicit-def: $vcc_hi +; MOVREL-NEXT: v_readfirstlane_b32 s0, v0 +; MOVREL-NEXT: v_readfirstlane_b32 s1, v1 +; MOVREL-NEXT: v_cndmask_b32_e32 v2, v15, v10, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v3, v16, v11, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v12 +; MOVREL-NEXT: v_readfirstlane_b32 s2, v2 +; MOVREL-NEXT: v_readfirstlane_b32 s3, v3 +; MOVREL-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v12 +; MOVREL-NEXT: v_readfirstlane_b32 s4, v4 +; MOVREL-NEXT: v_readfirstlane_b32 s5, v5 +; MOVREL-NEXT: v_cndmask_b32_e32 v6, v19, v10, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v7, v20, v11, vcc_lo +; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v12 +; MOVREL-NEXT: v_readfirstlane_b32 s6, v6 +; MOVREL-NEXT: v_readfirstlane_b32 s7, v7 +; MOVREL-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc_lo +; MOVREL-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc_lo +; MOVREL-NEXT: v_readfirstlane_b32 s8, v8 +; MOVREL-NEXT: v_readfirstlane_b32 s9, v9 ; MOVREL-NEXT: ; return to shader part epilog entry: %insert = insertelement <5 x double> %vec, double %val, i32 %idx