diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -8299,6 +8299,16 @@ SDLoc DL(Op); SDValue Cond = Op.getOperand(0); + if (Subtarget->hasScalarCompareEq64() && Op->getOperand(0)->hasOneUse() && + !Op->isDivergent()) { + if (VT == MVT::i64) + return Op; + SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(1)); + SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(2)); + return DAG.getNode(ISD::BITCAST, DL, VT, + DAG.getSelect(DL, MVT::i64, Cond, LHS, RHS)); + } + SDValue Zero = DAG.getConstant(0, DL, MVT::i32); SDValue One = DAG.getConstant(1, DL, MVT::i32); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -78,8 +78,11 @@ moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, MachineDominatorTree *MDT = nullptr) const; - void lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, - MachineDominatorTree *MDT = nullptr) const; + void lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst, + MachineDominatorTree *MDT = nullptr) const; + + void splitSelect64(SetVectorType &Worklist, MachineInstr &Inst, + MachineDominatorTree *MDT = nullptr) const; void lowerScalarAbs(SetVectorType &Worklist, MachineInstr &Inst) const; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -6026,8 +6026,11 @@ continue; case AMDGPU::S_CSELECT_B32: + lowerSelect32(Worklist, Inst, MDT); + Inst.eraseFromParent(); + continue; case AMDGPU::S_CSELECT_B64: - lowerSelect(Worklist, Inst, MDT); + splitSelect64(Worklist, Inst, MDT); Inst.eraseFromParent(); continue; case AMDGPU::S_CMP_EQ_I32: @@ -6205,8 +6208,8 @@ return std::make_pair(false, nullptr); } -void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, - MachineDominatorTree *MDT) const { +void SIInstrInfo::lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst, + MachineDominatorTree *MDT) const { MachineBasicBlock &MBB = *Inst.getParent(); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); @@ -6281,6 +6284,93 @@ addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); } +void SIInstrInfo::splitSelect64(SetVectorType &Worklist, MachineInstr &Inst, + MachineDominatorTree *MDT) const { + // Split S_CSELECT_B64 into a pair of S_CSELECT_B32 and lower them + // further. + const DebugLoc &DL = Inst.getDebugLoc(); + MachineBasicBlock::iterator MII = Inst; + MachineBasicBlock &MBB = *Inst.getParent(); + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + // Prepare the split destination. + Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + // Get the original operands. + MachineOperand &Dest = Inst.getOperand(0); + MachineOperand &Src0 = Inst.getOperand(1); + MachineOperand &Src1 = Inst.getOperand(2); + MachineOperand &Cond = Inst.getOperand(3); + + Register SCCSource = Cond.getReg(); + bool IsSCC = (SCCSource == AMDGPU::SCC); + + // If this is a trivial select where the condition is effectively not SCC + // (SCCSource is a source of copy to SCC), then the select is semantically + // equivalent to copying SCCSource. Hence, there is no need to create + // V_CNDMASK, we can just use that and bail out. + if (!IsSCC && (Src0.isImm() && Src0.getImm() == -1) && + (Src1.isImm() && Src1.getImm() == 0)) { + MRI.replaceRegWith(Dest.getReg(), SCCSource); + return; + } + + // Split the source operands. + const TargetRegisterClass *Src0RC = nullptr; + const TargetRegisterClass *Src0SubRC = nullptr; + if (Src0.isReg()) { + Src0RC = MRI.getRegClass(Src0.getReg()); + Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); + } + const TargetRegisterClass *Src1RC = nullptr; + const TargetRegisterClass *Src1SubRC = nullptr; + if (Src1.isReg()) { + Src1RC = MRI.getRegClass(Src1.getReg()); + Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); + } + // Split lo. + MachineOperand SrcReg0Sub0 = + buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); + MachineOperand SrcReg1Sub0 = + buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); + // Split hi. + MachineOperand SrcReg0Sub1 = + buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); + MachineOperand SrcReg1Sub1 = + buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); + // Select the lo part. + MachineInstr *LoHalf = + BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub0) + .add(SrcReg0Sub0) + .add(SrcReg1Sub0); + // Replace the condition operand with the original one. + LoHalf->getOperand(3).setReg(SCCSource); + Worklist.insert(LoHalf); + // Select the hi part. + MachineInstr *HiHalf = + BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub1) + .add(SrcReg0Sub1) + .add(SrcReg1Sub1); + // Replace the condition operand with the original one. + HiHalf->getOperand(3).setReg(SCCSource); + Worklist.insert(HiHalf); + // Merge them back to the original 64-bit one. + BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) + .addReg(DestSub0) + .addImm(AMDGPU::sub0) + .addReg(DestSub1) + .addImm(AMDGPU::sub1); + MRI.replaceRegWith(Dest.getReg(), FullDestReg); + + // Try to legalize the operands in case we need to swap the order to keep + // it valid. + legalizeOperands(*LoHalf, MDT); + legalizeOperands(*HiHalf, MDT); + + // Move all users of this moved value. + addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); +} + void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, MachineInstr &Inst) const { MachineBasicBlock &MBB = *Inst.getParent(); diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -517,9 +517,10 @@ def S_CSELECT_B32 : SOP2_32 <"s_cselect_b32", [(set i32:$sdst, (SelectPat i64:$src0, i64:$src1))] + >; } - - def S_CSELECT_B64 : SOP2_64 <"s_cselect_b64">; } // End Uses = [SCC] let Defs = [SCC] in { diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll --- a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll +++ b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll @@ -17,17 +17,15 @@ ; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]] ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7 -; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}} +; GFX9-DAG: s_load_dword s[[PTR:[0-9]+]], s[4:5], 0x0{{$}} ; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16) -; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16 -; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_SHARED_BASE]] +; GFX9-DAG: s_lshl_b32 s[[SSRC_SHARED_BASE:[0-9]+]], [[SSRC_SHARED]], 16 ; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base -; GFX9: s_cmp_lg_u32 [[PTR]], -1 -; GFX9: s_cselect_b64 vcc, -1, 0 -; GFX9: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc -; GFX9-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] -; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]] +; GFX9: s_cmp_lg_u32 s[[PTR]], -1 +; GFX9: s_cselect_b64 s{{\[}}[[SEL_LO:[0-9]+]]:[[SEL_HI:[0-9]+]]{{\]}}, s{{\[}}[[PTR]]:[[SSRC_SHARED_BASE]]{{\]}}, 0 +; GFX9-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], s[[SEL_LO]] +; GFX9-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], s[[SEL_HI]] ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]] @@ -84,19 +82,17 @@ ; CI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] ; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]] -; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}} +; GFX9-DAG: s_load_dword s[[PTR:[0-9]+]], s[4:5], 0x0{{$}} ; GFX9-DAG: s_getreg_b32 [[SSRC_PRIVATE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16) -; GFX9-DAG: s_lshl_b32 [[SSRC_PRIVATE_BASE:s[0-9]+]], [[SSRC_PRIVATE]], 16 -; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_PRIVATE_BASE]] +; GFX9-DAG: s_lshl_b32 s[[SSRC_PRIVATE_BASE:[0-9]+]], [[SSRC_PRIVATE]], 16 ; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_private_base ; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7 -; GFX9: s_cmp_lg_u32 [[PTR]], -1 -; GFX9: s_cselect_b64 vcc, -1, 0 -; GFX9: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc -; GFX9: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] -; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]] +; GFX9: s_cmp_lg_u32 s[[PTR]], -1 +; GFX9: s_cselect_b64 s{{\[}}[[SEL_LO:[0-9]+]]:[[SEL_HI:[0-9]+]]{{\]}}, s{{\[}}[[PTR]]:[[SSRC_PRIVATE_BASE]]{{\]}}, 0 +; GFX9-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], s[[SEL_LO]] +; GFX9-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], s[[SEL_HI]] ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]] diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll --- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll @@ -7316,24 +7316,21 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s2, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v6, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v6, s[0:1], 2, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v7, s[0:1], 0, v1, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v8, s[0:1], 1, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v9, s[0:1], 0, v1, s[0:1] -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v4, v9, v7, s[0:1] ; GFX9-NEXT: v_mov_b32_e32 v7, s7 ; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v7, v2, vcc ; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s3, v2 +; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc ; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s6, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v4, 1, 2, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s2, v2 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[0:1], v0, v4 ; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v8, v6, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: v_addc_co_u32_e64 v6, s[0:1], 0, v1, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc ; GFX9-NEXT: global_store_dwordx2 v5, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm %r = udiv i64 %x, 1235195949943 @@ -7600,6 +7597,7 @@ ; GFX9-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX9-NEXT: s_movk_i32 s8, 0xfff ; GFX9-NEXT: v_mul_hi_u32 v2, v0, s4 ; GFX9-NEXT: v_mul_lo_u32 v4, v1, s4 ; GFX9-NEXT: v_mul_lo_u32 v3, v0, s4 @@ -7624,7 +7622,6 @@ ; GFX9-NEXT: v_mul_hi_u32 v4, v0, s4 ; GFX9-NEXT: v_mul_lo_u32 v6, v2, s4 ; GFX9-NEXT: v_mul_lo_u32 v8, v0, s4 -; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 ; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34 ; GFX9-NEXT: v_sub_u32_e32 v4, v4, v0 ; GFX9-NEXT: v_add_u32_e32 v4, v4, v6 @@ -7638,7 +7635,7 @@ ; GFX9-NEXT: v_mul_lo_u32 v10, v2, v8 ; GFX9-NEXT: v_mul_hi_u32 v8, v2, v8 ; GFX9-NEXT: v_mul_lo_u32 v2, v2, v4 -; GFX9-NEXT: s_movk_i32 s0, 0xfff +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 ; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v10 ; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v9, v8, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v11, v5, vcc @@ -7658,42 +7655,39 @@ ; GFX9-NEXT: v_mul_lo_u32 v4, s7, v0 ; GFX9-NEXT: v_mul_hi_u32 v0, s7, v0 ; GFX9-NEXT: s_lshr_b64 s[2:3], s[4:5], 12 +; GFX9-NEXT: s_movk_i32 s4, 0xffe ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, v3, v0, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v6, v5, vcc ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v7, v2, vcc -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 2, v0 -; GFX9-NEXT: v_mul_lo_u32 v4, v1, s0 -; GFX9-NEXT: v_mul_hi_u32 v6, v0, s0 -; GFX9-NEXT: v_mul_lo_u32 v9, v0, s0 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 1, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v1, vcc -; GFX9-NEXT: v_add_u32_e32 v4, v6, v4 -; GFX9-NEXT: v_mov_b32_e32 v6, s7 -; GFX9-NEXT: v_sub_co_u32_e32 v9, vcc, s6, v9 -; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v6, v4, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s0, v9 -; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v4, vcc -; GFX9-NEXT: s_movk_i32 s0, 0xffe -; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s0, v6 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10 -; GFX9-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc -; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], s0, v9 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v4, -1, v6, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v1, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v0, v1, s[0:1] +; GFX9-NEXT: v_mul_lo_u32 v4, v0, s8 +; GFX9-NEXT: v_mul_lo_u32 v2, v1, s8 +; GFX9-NEXT: v_mul_hi_u32 v3, v0, s8 +; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, s6, v4 +; GFX9-NEXT: v_add_u32_e32 v2, v3, v2 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v3, v2, vcc +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, s8, v4 +; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v2, vcc +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s4, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v3, -1, v3, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 1, 2, vcc +; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v0, v3 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v1, vcc +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s4, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, -1, v4, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: global_store_dwordx4 v5, v[0:3], s[8:9] +; GFX9-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1] ; GFX9-NEXT: s_endpgm %r = udiv <2 x i64> %x, store <2 x i64> %r, <2 x i64> addrspace(1)* %out @@ -7990,19 +7984,19 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[2:3] ; GFX9-NEXT: v_subbrev_co_u32_e64 v2, s[0:1], 0, v2, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v7 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v6, s7 -; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v6, v1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v4, s7 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s6, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc ; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s10, v0 -; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s8, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; GFX9-NEXT: global_store_dwordx2 v5, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm %r = urem i64 %x, 1235195393993 @@ -8348,6 +8342,9 @@ ; GFX9-NEXT: v_mul_hi_u32 v3, v0, s8 ; GFX9-NEXT: v_mul_lo_u32 v2, v1, s8 ; GFX9-NEXT: v_mul_lo_u32 v4, v0, s8 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_ashr_i32 s0, s7, 31 +; GFX9-NEXT: s_mov_b32 s1, s0 ; GFX9-NEXT: v_add_u32_e32 v2, v3, v2 ; GFX9-NEXT: v_sub_u32_e32 v2, v2, v0 ; GFX9-NEXT: v_mul_hi_u32 v3, v0, v4 @@ -8387,61 +8384,55 @@ ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v8, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v7, v4, vcc ; GFX9-NEXT: v_addc_co_u32_e64 v1, vcc, v1, v4, s[2:3] -; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_ashr_i32 s2, s7, 31 -; GFX9-NEXT: s_add_u32 s0, s6, s2 +; GFX9-NEXT: s_add_u32 s2, s6, s0 +; GFX9-NEXT: s_addc_u32 s3, s7, s0 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 -; GFX9-NEXT: s_mov_b32 s3, s2 -; GFX9-NEXT: s_addc_u32 s1, s7, s2 -; GFX9-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1] ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX9-NEXT: v_mul_lo_u32 v2, s0, v1 -; GFX9-NEXT: v_mul_hi_u32 v3, s0, v0 -; GFX9-NEXT: v_mul_hi_u32 v4, s0, v1 -; GFX9-NEXT: v_mul_hi_u32 v6, s1, v1 -; GFX9-NEXT: v_mul_lo_u32 v1, s1, v1 +; GFX9-NEXT: v_mul_lo_u32 v2, s2, v1 +; GFX9-NEXT: v_mul_hi_u32 v3, s2, v0 +; GFX9-NEXT: v_mul_hi_u32 v4, s2, v1 +; GFX9-NEXT: v_mul_hi_u32 v6, s3, v1 +; GFX9-NEXT: v_mul_lo_u32 v1, s3, v1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v3, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v7, v4, vcc -; GFX9-NEXT: v_mul_lo_u32 v4, s1, v0 -; GFX9-NEXT: v_mul_hi_u32 v0, s1, v0 -; GFX9-NEXT: s_mov_b32 s3, 0x12d8fb +; GFX9-NEXT: v_mul_lo_u32 v4, s3, v0 +; GFX9-NEXT: v_mul_hi_u32 v0, s3, v0 +; GFX9-NEXT: s_mov_b32 s1, 0x12d8fb ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, v3, v0, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v6, v5, vcc ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v7, v2, vcc -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 2, v0 -; GFX9-NEXT: v_mul_lo_u32 v4, v1, s3 -; GFX9-NEXT: v_mul_hi_u32 v6, v0, s3 -; GFX9-NEXT: v_mul_lo_u32 v9, v0, s3 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 1, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v1, vcc -; GFX9-NEXT: v_add_u32_e32 v4, v6, v4 -; GFX9-NEXT: v_sub_co_u32_e32 v9, vcc, s0, v9 -; GFX9-NEXT: v_mov_b32_e32 v6, s1 -; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v6, v4, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s3, v9 -; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v4, vcc -; GFX9-NEXT: s_mov_b32 s0, 0x12d8fa -; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s0, v6 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10 -; GFX9-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc -; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], s0, v9 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v4, -1, v6, s[0:1] -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] -; GFX9-NEXT: v_xor_b32_e32 v0, s2, v0 -; GFX9-NEXT: v_xor_b32_e32 v1, s2, v1 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s2, v0 +; GFX9-NEXT: v_mul_lo_u32 v4, v0, s1 +; GFX9-NEXT: v_mul_lo_u32 v2, v1, s1 +; GFX9-NEXT: v_mul_hi_u32 v3, v0, s1 +; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, s2, v4 +; GFX9-NEXT: v_add_u32_e32 v2, v3, v2 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v3, v2, vcc +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, s1, v4 +; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v2, vcc +; GFX9-NEXT: s_mov_b32 s1, 0x12d8fa +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s1, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v3, -1, v3, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 1, 2, vcc +; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v0, v3 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v1, vcc +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s1, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, -1, v4, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc +; GFX9-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX9-NEXT: v_xor_b32_e32 v1, s0, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, s0 +; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc ; GFX9-NEXT: global_store_dwordx2 v5, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm @@ -8750,25 +8741,22 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v5 ; GFX9-NEXT: v_cndmask_b32_e64 v5, v7, v6, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v6, s[0:1], 2, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v7, s[0:1], 0, v1, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v8, s[0:1], 1, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v9, s[0:1], 0, v1, s[0:1] -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v5 -; GFX9-NEXT: v_cndmask_b32_e64 v5, v9, v7, s[0:1] ; GFX9-NEXT: v_mov_b32_e32 v7, s7 ; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v7, v3, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s11, v3 +; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v5 ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s10, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v5, 1, 2, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s11, v3 +; GFX9-NEXT: v_add_co_u32_e64 v5, s[0:1], v0, v5 ; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v4, vcc +; GFX9-NEXT: v_addc_co_u32_e64 v6, s[0:1], 0, v1, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v6, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: s_xor_b64 s[0:1], s[2:3], s[8:9] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc ; GFX9-NEXT: v_xor_b32_e32 v0, s0, v0 ; GFX9-NEXT: v_xor_b32_e32 v1, s1, v1 ; GFX9-NEXT: v_mov_b32_e32 v3, s1 @@ -9017,6 +9005,7 @@ ; GFX9-NEXT: v_mul_lo_u32 v7, v1, v5 ; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5 ; GFX9-NEXT: s_ashr_i64 s[4:5], s[4:5], 12 +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 ; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v7 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v6, v5, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v8, v4, vcc @@ -9029,7 +9018,6 @@ ; GFX9-NEXT: v_mul_hi_u32 v7, v0, s8 ; GFX9-NEXT: v_mul_lo_u32 v8, v0, s8 ; GFX9-NEXT: v_add_u32_e32 v1, v1, v3 -; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 ; GFX9-NEXT: v_add_u32_e32 v5, v7, v5 ; GFX9-NEXT: v_sub_u32_e32 v5, v5, v0 ; GFX9-NEXT: v_mul_lo_u32 v10, v0, v5 @@ -9063,40 +9051,37 @@ ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v5, vcc ; GFX9-NEXT: v_mul_lo_u32 v5, s7, v0 ; GFX9-NEXT: v_mul_hi_u32 v0, s7, v0 -; GFX9-NEXT: s_movk_i32 s0, 0xfff +; GFX9-NEXT: s_movk_i32 s3, 0xfff ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v5 ; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, v3, v0, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v7, v4, vcc ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v6, v2, vcc -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 2, v0 -; GFX9-NEXT: v_mul_lo_u32 v5, v1, s0 -; GFX9-NEXT: v_mul_hi_u32 v6, v0, s0 -; GFX9-NEXT: v_mul_lo_u32 v9, v0, s0 -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 1, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v1, vcc -; GFX9-NEXT: v_add_u32_e32 v5, v6, v5 -; GFX9-NEXT: v_mov_b32_e32 v6, s7 -; GFX9-NEXT: v_sub_co_u32_e32 v9, vcc, s6, v9 -; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s0, v9 -; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v5, vcc -; GFX9-NEXT: s_movk_i32 s0, 0xffe -; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s0, v6 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10 -; GFX9-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc -; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], s0, v9 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v5 -; GFX9-NEXT: v_cndmask_b32_e64 v5, -1, v6, s[0:1] -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v5 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] +; GFX9-NEXT: v_mul_lo_u32 v5, v0, s3 +; GFX9-NEXT: v_mul_lo_u32 v2, v1, s3 +; GFX9-NEXT: v_mul_hi_u32 v3, v0, s3 +; GFX9-NEXT: v_sub_co_u32_e32 v5, vcc, s6, v5 +; GFX9-NEXT: v_add_u32_e32 v2, v3, v2 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v3, v2, vcc +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, s3, v5 +; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v2, vcc +; GFX9-NEXT: s_movk_i32 s3, 0xffe +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s3, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v3, -1, v3, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; GFX9-NEXT: v_cndmask_b32_e64 v3, 1, 2, vcc +; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v0, v3 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v1, vcc +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s3, v5 +; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, -1, v5, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc ; GFX9-NEXT: v_xor_b32_e32 v0, s2, v0 ; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s2, v0 ; GFX9-NEXT: v_xor_b32_e32 v1, s2, v1 @@ -9105,7 +9090,7 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: global_store_dwordx4 v4, v[0:3], s[8:9] +; GFX9-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] ; GFX9-NEXT: s_endpgm %r = sdiv <2 x i64> %x, store <2 x i64> %r, <2 x i64> addrspace(1)* %out @@ -9511,67 +9496,64 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v7, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v7, s[0:1], 2, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v8, s[0:1], 0, v1, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v9, s[0:1], 1, v0 -; GFX9-NEXT: v_addc_co_u32_e64 v10, s[0:1], 0, v1, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v4, v10, v8, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v4, 1, 2, s[0:1] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[0:1], v0, v4 ; GFX9-NEXT: v_mov_b32_e32 v8, s5 ; GFX9-NEXT: s_xor_b64 s[4:5], s[14:15], s[12:13] ; GFX9-NEXT: s_ashr_i32 s12, s9, 31 -; GFX9-NEXT: s_add_u32 s8, s8, s12 +; GFX9-NEXT: v_addc_co_u32_e64 v7, s[0:1], 0, v1, s[0:1] +; GFX9-NEXT: s_add_u32 s0, s8, s12 ; GFX9-NEXT: s_mov_b32 s13, s12 -; GFX9-NEXT: s_addc_u32 s9, s9, s12 -; GFX9-NEXT: s_xor_b64 s[8:9], s[8:9], s[12:13] -; GFX9-NEXT: v_cvt_f32_u32_e32 v10, s8 -; GFX9-NEXT: v_cvt_f32_u32_e32 v11, s9 +; GFX9-NEXT: s_addc_u32 s1, s9, s12 +; GFX9-NEXT: s_xor_b64 s[8:9], s[0:1], s[12:13] ; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v8, v2, vcc +; GFX9-NEXT: v_cvt_f32_u32_e32 v8, s8 +; GFX9-NEXT: v_cvt_f32_u32_e32 v9, s9 ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s11, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s10, v3 +; GFX9-NEXT: v_mac_f32_e32 v8, s16, v9 +; GFX9-NEXT: v_rcp_f32_e32 v8, v8 ; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s11, v2 -; GFX9-NEXT: v_mac_f32_e32 v10, s16, v11 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v8, v3, vcc -; GFX9-NEXT: v_rcp_f32_e32 v3, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v3, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GFX9-NEXT: s_sub_u32 s10, 0, s8 -; GFX9-NEXT: v_mul_f32_e32 v3, s17, v3 -; GFX9-NEXT: v_mul_f32_e32 v4, s18, v3 -; GFX9-NEXT: v_trunc_f32_e32 v4, v4 -; GFX9-NEXT: v_mac_f32_e32 v3, s19, v4 -; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4 +; GFX9-NEXT: v_mul_f32_e32 v2, s17, v8 +; GFX9-NEXT: v_mul_f32_e32 v3, s18, v2 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mac_f32_e32 v2, s19, v3 +; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2 ; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v9, v7, s[0:1] +; GFX9-NEXT: s_sub_u32 s10, 0, s8 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; GFX9-NEXT: s_subb_u32 s11, 0, s9 -; GFX9-NEXT: v_mul_lo_u32 v8, s10, v4 -; GFX9-NEXT: v_mul_hi_u32 v7, s10, v3 -; GFX9-NEXT: v_mul_lo_u32 v9, s11, v3 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; GFX9-NEXT: v_mul_lo_u32 v2, s10, v3 -; GFX9-NEXT: v_add_u32_e32 v7, v7, v8 -; GFX9-NEXT: v_add_u32_e32 v7, v7, v9 -; GFX9-NEXT: v_mul_lo_u32 v8, v3, v7 -; GFX9-NEXT: v_mul_hi_u32 v9, v3, v2 -; GFX9-NEXT: v_mul_hi_u32 v10, v3, v7 -; GFX9-NEXT: v_mul_hi_u32 v11, v4, v7 -; GFX9-NEXT: v_mul_lo_u32 v7, v4, v7 +; GFX9-NEXT: v_mul_hi_u32 v4, s10, v2 +; GFX9-NEXT: v_mul_lo_u32 v8, s10, v3 +; GFX9-NEXT: v_mul_lo_u32 v9, s11, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GFX9-NEXT: v_mul_lo_u32 v7, s10, v2 +; GFX9-NEXT: v_add_u32_e32 v4, v4, v8 +; GFX9-NEXT: v_add_u32_e32 v4, v4, v9 +; GFX9-NEXT: v_mul_lo_u32 v8, v2, v4 +; GFX9-NEXT: v_mul_hi_u32 v9, v2, v7 +; GFX9-NEXT: v_mul_hi_u32 v10, v2, v4 +; GFX9-NEXT: v_mul_hi_u32 v11, v3, v4 +; GFX9-NEXT: v_mul_lo_u32 v4, v3, v4 ; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8 ; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, 0, v10, vcc -; GFX9-NEXT: v_mul_lo_u32 v10, v4, v2 -; GFX9-NEXT: v_mul_hi_u32 v2, v4, v2 +; GFX9-NEXT: v_mul_lo_u32 v10, v3, v7 +; GFX9-NEXT: v_mul_hi_u32 v7, v3, v7 ; GFX9-NEXT: v_xor_b32_e32 v0, s4, v0 ; GFX9-NEXT: v_xor_b32_e32 v1, s5, v1 ; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v8, v10 -; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v9, v2, vcc +; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v9, v7, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v11, v6, vcc -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v7 -; GFX9-NEXT: v_add_co_u32_e64 v2, s[0:1], v3, v2 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4 +; GFX9-NEXT: v_add_co_u32_e64 v2, s[0:1], v2, v4 ; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v5, v8, vcc -; GFX9-NEXT: v_addc_co_u32_e64 v3, vcc, v4, v7, s[0:1] -; GFX9-NEXT: v_mul_lo_u32 v8, s10, v3 +; GFX9-NEXT: v_addc_co_u32_e64 v4, vcc, v3, v7, s[0:1] +; GFX9-NEXT: v_mul_lo_u32 v8, s10, v4 ; GFX9-NEXT: v_mul_hi_u32 v9, s10, v2 ; GFX9-NEXT: v_mul_lo_u32 v10, s11, v2 ; GFX9-NEXT: v_mul_lo_u32 v11, s10, v2 @@ -9581,25 +9563,25 @@ ; GFX9-NEXT: v_mul_lo_u32 v12, v2, v8 ; GFX9-NEXT: v_mul_hi_u32 v13, v2, v11 ; GFX9-NEXT: v_mul_hi_u32 v14, v2, v8 -; GFX9-NEXT: v_mul_hi_u32 v10, v3, v11 -; GFX9-NEXT: v_mul_lo_u32 v11, v3, v11 +; GFX9-NEXT: v_mul_hi_u32 v10, v4, v11 +; GFX9-NEXT: v_mul_lo_u32 v11, v4, v11 ; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v13, v12 -; GFX9-NEXT: v_mul_hi_u32 v9, v3, v8 +; GFX9-NEXT: v_mul_hi_u32 v9, v4, v8 ; GFX9-NEXT: v_addc_co_u32_e32 v13, vcc, 0, v14, vcc -; GFX9-NEXT: v_mul_lo_u32 v3, v3, v8 +; GFX9-NEXT: v_mul_lo_u32 v4, v4, v8 ; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v12, v11 ; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v13, v10, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v9, v6, vcc -; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v10, v3 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 ; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v5, v8, vcc -; GFX9-NEXT: v_add_u32_e32 v4, v4, v7 -; GFX9-NEXT: v_addc_co_u32_e64 v4, vcc, v4, v8, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v3, v3, v7 +; GFX9-NEXT: v_addc_co_u32_e64 v3, vcc, v3, v8, s[0:1] ; GFX9-NEXT: s_add_u32 s0, s6, s10 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: s_mov_b32 s11, s10 ; GFX9-NEXT: s_addc_u32 s1, s7, s10 ; GFX9-NEXT: s_xor_b64 s[6:7], s[0:1], s[10:11] -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX9-NEXT: v_mul_lo_u32 v4, s6, v3 ; GFX9-NEXT: v_mul_hi_u32 v7, s6, v2 ; GFX9-NEXT: v_mul_hi_u32 v9, s6, v3 @@ -9635,25 +9617,22 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v7, v9, v8, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v8, s[0:1], 2, v2 -; GFX9-NEXT: v_addc_co_u32_e64 v9, s[0:1], 0, v3, s[0:1] -; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v2 -; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1] -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v7 -; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, v9, s[0:1] ; GFX9-NEXT: v_mov_b32_e32 v9, s7 ; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v9, v4, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s9, v4 +; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s8, v5 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 1, 2, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s9, v4 +; GFX9-NEXT: v_add_co_u32_e64 v7, s[0:1], v2, v7 ; GFX9-NEXT: v_cndmask_b32_e32 v4, v9, v5, vcc +; GFX9-NEXT: v_addc_co_u32_e64 v8, s[0:1], 0, v3, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v4, v10, v8, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GFX9-NEXT: s_xor_b64 s[0:1], s[10:11], s[12:13] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc ; GFX9-NEXT: v_xor_b32_e32 v2, s0, v2 ; GFX9-NEXT: v_xor_b32_e32 v3, s1, v3 ; GFX9-NEXT: v_mov_b32_e32 v4, s1 @@ -9810,6 +9789,9 @@ ; GFX9-NEXT: v_mul_hi_u32 v3, v0, s8 ; GFX9-NEXT: v_mul_lo_u32 v2, v1, s8 ; GFX9-NEXT: v_mul_lo_u32 v4, v0, s8 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_ashr_i32 s0, s7, 31 +; GFX9-NEXT: s_mov_b32 s1, s0 ; GFX9-NEXT: v_add_u32_e32 v2, v3, v2 ; GFX9-NEXT: v_sub_u32_e32 v2, v2, v0 ; GFX9-NEXT: v_mul_hi_u32 v3, v0, v4 @@ -9849,59 +9831,56 @@ ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v8, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v7, v4, vcc ; GFX9-NEXT: v_addc_co_u32_e64 v1, vcc, v1, v4, s[2:3] -; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_ashr_i32 s2, s7, 31 -; GFX9-NEXT: s_add_u32 s0, s6, s2 +; GFX9-NEXT: s_add_u32 s2, s6, s0 +; GFX9-NEXT: s_addc_u32 s3, s7, s0 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 -; GFX9-NEXT: s_mov_b32 s3, s2 -; GFX9-NEXT: s_addc_u32 s1, s7, s2 -; GFX9-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1] ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX9-NEXT: v_mul_lo_u32 v2, s0, v1 -; GFX9-NEXT: v_mul_hi_u32 v3, s0, v0 -; GFX9-NEXT: v_mul_hi_u32 v4, s0, v1 -; GFX9-NEXT: v_mul_hi_u32 v6, s1, v1 -; GFX9-NEXT: v_mul_lo_u32 v1, s1, v1 +; GFX9-NEXT: v_mul_lo_u32 v2, s2, v1 +; GFX9-NEXT: v_mul_hi_u32 v3, s2, v0 +; GFX9-NEXT: v_mul_hi_u32 v4, s2, v1 +; GFX9-NEXT: v_mul_hi_u32 v6, s3, v1 +; GFX9-NEXT: v_mul_lo_u32 v1, s3, v1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v3, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v7, v4, vcc -; GFX9-NEXT: v_mul_lo_u32 v4, s1, v0 -; GFX9-NEXT: v_mul_hi_u32 v0, s1, v0 -; GFX9-NEXT: s_mov_b32 s3, 0x12d8fb +; GFX9-NEXT: v_mul_lo_u32 v4, s3, v0 +; GFX9-NEXT: v_mul_hi_u32 v0, s3, v0 +; GFX9-NEXT: s_mov_b32 s1, 0x12d8fb ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, v3, v0, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v6, v5, vcc ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v1 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v7, v2, vcc -; GFX9-NEXT: v_mul_hi_u32 v2, v0, s3 -; GFX9-NEXT: v_mul_lo_u32 v1, v1, s3 -; GFX9-NEXT: v_mul_lo_u32 v0, v0, s3 +; GFX9-NEXT: v_mul_hi_u32 v2, v0, s1 +; GFX9-NEXT: v_mul_lo_u32 v1, v1, s1 +; GFX9-NEXT: v_mul_lo_u32 v0, v0, s1 ; GFX9-NEXT: v_add_u32_e32 v1, v2, v1 -; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: v_mov_b32_e32 v2, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 +; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s2, v0 ; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v2, v1, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s3, v0 +; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s1, v0 ; GFX9-NEXT: v_subbrev_co_u32_e32 v3, vcc, 0, v1, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s3, v2 +; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s1, v2 ; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v3, vcc -; GFX9-NEXT: s_mov_b32 s0, 0x12d8fa -; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s0, v2 +; GFX9-NEXT: s_mov_b32 s1, 0x12d8fa +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s1, v2 ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 ; GFX9-NEXT: v_cndmask_b32_e32 v7, -1, v7, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], s0, v0 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v6, -1, v6, s[0:1] -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] -; GFX9-NEXT: v_xor_b32_e32 v0, s2, v0 -; GFX9-NEXT: v_xor_b32_e32 v1, s2, v1 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s2, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s1, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc +; GFX9-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX9-NEXT: v_xor_b32_e32 v1, s0, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, s0 +; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc ; GFX9-NEXT: global_store_dwordx2 v5, v[0:1], s[4:5] ; GFX9-NEXT: s_endpgm @@ -10216,19 +10195,19 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[2:3] ; GFX9-NEXT: v_subbrev_co_u32_e64 v3, s[0:1], 0, v3, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v7 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v3, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v6, s7 -; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v6, v1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v4, v5, v4, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, s7 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v5, v1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s9, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s8, v0 -; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v3, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s9, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v4, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: v_xor_b32_e32 v0, s10, v0 ; GFX9-NEXT: v_xor_b32_e32 v1, s10, v1 ; GFX9-NEXT: v_mov_b32_e32 v3, s10 @@ -10704,37 +10683,37 @@ ; GFX9-NEXT: v_subrev_co_u32_e64 v4, s[0:1], s12, v0 ; GFX9-NEXT: v_subbrev_co_u32_e64 v7, s[2:3], 0, v2, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[2:3], s13, v7 +; GFX9-NEXT: v_subb_co_u32_e64 v2, s[0:1], v2, v3, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[2:3] ; GFX9-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v4 +; GFX9-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[2:3] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], s13, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[2:3] -; GFX9-NEXT: s_ashr_i32 s2, s11, 31 -; GFX9-NEXT: v_subb_co_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GFX9-NEXT: s_add_u32 s10, s10, s2 -; GFX9-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v4 -; GFX9-NEXT: s_mov_b32 s3, s2 -; GFX9-NEXT: s_addc_u32 s11, s11, s2 -; GFX9-NEXT: s_xor_b64 s[10:11], s[10:11], s[2:3] ; GFX9-NEXT: v_subbrev_co_u32_e64 v2, s[0:1], 0, v2, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v8 -; GFX9-NEXT: v_cvt_f32_u32_e32 v8, s10 -; GFX9-NEXT: v_cvt_f32_u32_e32 v9, s11 +; GFX9-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v7, s15 -; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v7, v1, vcc -; GFX9-NEXT: v_mac_f32_e32 v8, s16, v9 +; GFX9-NEXT: s_ashr_i32 s0, s11, 31 +; GFX9-NEXT: s_add_u32 s2, s10, s0 +; GFX9-NEXT: s_mov_b32 s1, s0 +; GFX9-NEXT: s_addc_u32 s3, s11, s0 +; GFX9-NEXT: s_xor_b64 s[10:11], s[2:3], s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v4, s15 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v4, v1, vcc +; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s10 +; GFX9-NEXT: v_cvt_f32_u32_e32 v7, s11 ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s13, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 -; GFX9-NEXT: v_rcp_f32_e32 v8, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, vcc +; GFX9-NEXT: v_mac_f32_e32 v4, s16, v7 +; GFX9-NEXT: v_rcp_f32_e32 v4, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s13, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v8, v9, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, v3, s[0:1] -; GFX9-NEXT: v_mul_f32_e32 v3, s17, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GFX9-NEXT: v_mul_f32_e32 v3, s17, v4 ; GFX9-NEXT: v_mul_f32_e32 v4, s18, v3 ; GFX9-NEXT: v_trunc_f32_e32 v4, v4 ; GFX9-NEXT: v_mac_f32_e32 v3, s19, v4 @@ -10745,7 +10724,7 @@ ; GFX9-NEXT: v_mul_hi_u32 v7, s2, v3 ; GFX9-NEXT: v_mul_lo_u32 v8, s2, v4 ; GFX9-NEXT: v_mul_lo_u32 v9, s3, v3 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX9-NEXT: v_mul_lo_u32 v2, s2, v3 ; GFX9-NEXT: v_add_u32_e32 v7, v7, v8 ; GFX9-NEXT: v_add_u32_e32 v7, v7, v9 @@ -10835,19 +10814,19 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[2:3] ; GFX9-NEXT: v_subbrev_co_u32_e64 v4, s[0:1], 0, v4, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v4, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v8, s7 -; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v8, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v7, s7 +; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v7, v3, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s11, v3 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s10, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v4, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s11, v3 -; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GFX9-NEXT: v_xor_b32_e32 v2, s12, v2 ; GFX9-NEXT: v_xor_b32_e32 v3, s12, v3 ; GFX9-NEXT: v_mov_b32_e32 v4, s12 diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll --- a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll +++ b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll @@ -159,7 +159,7 @@ } ; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i64: -; GCN: s_cselect_b32 s{{[0-9]+}}, 0, 5 +; GCN: s_cselect_b64 s[{{[0-9]+}}:{{[0-9]+}}], 0, 5 define amdgpu_kernel void @sdiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) { %sel = select i1 %cond, i64 121, i64 23 %bo = sdiv i64 120, %sel @@ -177,7 +177,7 @@ } ; GCN-LABEL: {{^}}udiv_constant_sel_constants_i64: -; GCN: s_cselect_b32 s{{[0-9]+}}, 0, 5 +; GCN: s_cselect_b64 s[{{[0-9]+}}:{{[0-9]+}}], 0, 5 define amdgpu_kernel void @udiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) { %sel = select i1 %cond, i64 -4, i64 23 %bo = udiv i64 120, %sel @@ -186,7 +186,7 @@ } ; GCN-LABEL: {{^}}srem_constant_sel_constants: -; GCN: s_cselect_b32 s{{[0-9]+}}, 33, 3 +; GCN: s_cselect_b64 s[{{[0-9]+}}:{{[0-9]+}}], 33, 3 define amdgpu_kernel void @srem_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) { %sel = select i1 %cond, i64 34, i64 15 %bo = srem i64 33, %sel @@ -195,7 +195,7 @@ } ; GCN-LABEL: {{^}}urem_constant_sel_constants: -; GCN: s_cselect_b32 s{{[0-9]+}}, 33, 3 +; GCN: s_cselect_b64 s[{{[0-9]+}}:{{[0-9]+}}], 33, 3 define amdgpu_kernel void @urem_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) { %sel = select i1 %cond, i64 34, i64 15 %bo = urem i64 33, %sel diff --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll --- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll +++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll @@ -38,16 +38,23 @@ ; GCN-LABEL: {{^}}double4_extelt: ; GCN-NOT: buffer_ +; GCN-DAG: s_mov_b32 s[[L0LO:[0-9]+]], 0x47ae147b +; GCN-DAG: s_mov_b32 s[[L0HI:[0-9]+]], 0x3f847ae1 +; GCN-DAG: s_mov_b32 s[[L1LO:[0-9]+]], 0xc28f5c29 +; GCN-DAG: s_mov_b32 s[[L1HI:[0-9]+]], 0x3ff028f5 ; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2 -; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3 -; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C3]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; GCN: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s{{\[}}[[L1LO]]:[[L1HI]]{{\]}}, s{{\[}}[[L0LO]]:[[L0HI]]{{\]}} +; GCN-DAG: s_mov_b32 s[[L2LO:[0-9]+]], 0xe147ae14 +; GCN-DAG: s_mov_b32 s[[L2HI:[0-9]+]], 0x4000147a +; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2 +; GCN: s_cselect_b64 s{{\[}}[[T1LO:[0-9]+]]:[[T1HI:[0-9]+]]{{\]}}, s{{\[}}[[T0LO]]:[[T0HI]]{{\]}}, s{{\[}}[[L2LO]]:[[L2HI]]{{\]}} +; GCN-DAG: s_mov_b32 s[[L3LO:[0-9]+]], 0x70a3d70a +; GCN-DAG: s_mov_b32 s[[L3HI:[0-9]+]], 0x40100a3d +; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3 +; GCN: s_cselect_b64 s{{\[}}[[T2LO:[0-9]+]]:[[T2HI:[0-9]+]]{{\]}}, s{{\[}}[[T1LO]]:[[T1HI]]{{\]}}, s{{\[}}[[L3LO]]:[[L3HI]]{{\]}} +; GCN-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T2LO]] +; GCN-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T2HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @double4_extelt(double addrspace(1)* %out, i32 %sel) { entry: %ext = extractelement <4 x double> , i32 %sel @@ -57,19 +64,27 @@ ; GCN-LABEL: {{^}}double5_extelt: ; GCN-NOT: buffer_ +; GCN-DAG: s_mov_b32 s[[L0LO:[0-9]+]], 0x47ae147b +; GCN-DAG: s_mov_b32 s[[L0HI:[0-9]+]], 0x3f847ae1 +; GCN-DAG: s_mov_b32 s[[L1LO:[0-9]+]], 0xc28f5c29 +; GCN-DAG: s_mov_b32 s[[L1HI:[0-9]+]], 0x3ff028f5 ; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2 -; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3 -; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 4 -; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C3]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C4]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; GCN: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s{{\[}}[[L1LO]]:[[L1HI]]{{\]}}, s{{\[}}[[L0LO]]:[[L0HI]]{{\]}} +; GCN-DAG: s_mov_b32 s[[L2LO:[0-9]+]], 0xe147ae14 +; GCN-DAG: s_mov_b32 s[[L2HI:[0-9]+]], 0x4000147a +; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2 +; GCN: s_cselect_b64 s{{\[}}[[T1LO:[0-9]+]]:[[T1HI:[0-9]+]]{{\]}}, s{{\[}}[[T0LO]]:[[T0HI]]{{\]}}, s{{\[}}[[L2LO]]:[[L2HI]]{{\]}} +; GCN-DAG: s_mov_b32 s[[L3LO:[0-9]+]], 0x70a3d70a +; GCN-DAG: s_mov_b32 s[[L3HI:[0-9]+]], 0x40100a3d +; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3 +; GCN: s_cselect_b64 s{{\[}}[[T2LO:[0-9]+]]:[[T2HI:[0-9]+]]{{\]}}, s{{\[}}[[T1LO]]:[[T1HI]]{{\]}}, s{{\[}}[[L3LO]]:[[L3HI]]{{\]}} +; Double literals 5.01 and 4.01 share the same low 32 bits. +; GCN-DAG: s_mov_b32 s[[L4HI:[0-9]+]], 0x40140a3d +; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4 +; GCN: s_cselect_b64 s{{\[}}[[T3LO:[0-9]+]]:[[T3HI:[0-9]+]]{{\]}}, s{{\[}}[[T2LO]]:[[T2HI]]{{\]}}, s{{\[}}[[L3LO]]:[[L4HI]]{{\]}} +; GCN-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T3LO]] +; GCN-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T3HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @double5_extelt(double addrspace(1)* %out, i32 %sel) { entry: %ext = extractelement <5 x double> , i32 %sel @@ -107,11 +122,15 @@ ; GCN-LABEL: {{^}}double2_extelt: ; GCN-NOT: buffer_ +; GCN-DAG: s_mov_b32 s[[L0LO:[0-9]+]], 0x47ae147b +; GCN-DAG: s_mov_b32 s[[L0HI:[0-9]+]], 0x3f847ae1 +; GCN-DAG: s_mov_b32 s[[L1LO:[0-9]+]], 0xc28f5c29 +; GCN-DAG: s_mov_b32 s[[L1HI:[0-9]+]], 0x3ff028f5 ; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; GCN: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s{{\[}}[[L1LO]]:[[L1HI]]{{\]}}, s{{\[}}[[L0LO]]:[[L0HI]]{{\]}} +; GCN-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T0LO]] +; GCN-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T0HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @double2_extelt(double addrspace(1)* %out, i32 %sel) { entry: %ext = extractelement <2 x double> , i32 %sel diff --git a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll --- a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll +++ b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s ; GCN-LABEL: {{^}}extract_vector_elt_v3f64_2: ; GCN: buffer_load_dwordx4 @@ -14,15 +14,22 @@ ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3f64: ; GCN-NOT: buffer_load -; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2 -; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; SI-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; SI-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 +; SI-DAG: s_cmp_eq_u32 [[IDX]], 2 +; SI-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI: store_dwordx2 v[{{[0-9:]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; VI: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 2 +; VI: s_cselect_b64 s{{\[}}[[T1LO:[0-9]+]]:[[T1HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s{{\[}}[[T0LO]]:[[T0HI]]{{\]}} +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T1LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T1HI]] +; VI: store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %out, <3 x double> %foo, i32 %elt) #0 { %dynelt = extractelement <3 x double> %foo, i32 %elt store volatile double %dynelt, double addrspace(1)* %out @@ -31,19 +38,28 @@ ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4f64: ; GCN-NOT: buffer_load -; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2 -; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3 -; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; SI-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; SI-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 +; SI-DAG: s_cmp_eq_u32 [[IDX]], 2 +; SI-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 +; SI-DAG: s_cmp_eq_u32 [[IDX]], 3 +; SI-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0 +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] +; SI: store_dwordx2 v[{{[0-9:]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; VI: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 2 +; VI: s_cselect_b64 s{{\[}}[[T1LO:[0-9]+]]:[[T1HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s{{\[}}[[T0LO]]:[[T0HI]]{{\]}} +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 3 +; VI: s_cselect_b64 s{{\[}}[[T2LO:[0-9]+]]:[[T2HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s{{\[}}[[T1LO]]:[[T1HI]]{{\]}} +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T2LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T2HI]] +; VI: store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @dyn_extract_vector_elt_v4f64(double addrspace(1)* %out, <4 x double> %foo, i32 %elt) #0 { %dynelt = extractelement <4 x double> %foo, i32 %elt store volatile double %dynelt, double addrspace(1)* %out diff --git a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll --- a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll +++ b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s ; How the replacement of i64 stores with v2i32 stores resulted in ; breaking other users of the bitcast if they already existed @@ -32,10 +32,14 @@ ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64: ; GCN-NOT: buffer_load ; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; SI-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI: store_dwordx2 v[{{[0-9:]+}}] +; VI: s_cselect_b64 s{{\[}}[[S_LO:[0-9]+]]:[[S_HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[S_LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[S_HI]] +; VI: store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) #0 { %dynelt = extractelement <2 x i64> %foo, i32 %elt store volatile i64 %dynelt, i64 addrspace(1)* %out @@ -59,16 +63,23 @@ } ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3i64: -; GCN-NOT: buffer_load -; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2 -; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; SI-NOT: buffer_load +; SI-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; SI-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 +; SI-DAG: s_cmp_eq_u32 [[IDX]], 2 +; SI-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI: store_dwordx2 v[{{[0-9:]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; VI: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 2 +; VI: s_cselect_b64 s{{\[}}[[T1LO:[0-9]+]]:[[T1HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s{{\[}}[[T0LO]]:[[T0HI]]{{\]}} +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T1LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T1HI]] +; VI: store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @dyn_extract_vector_elt_v3i64(i64 addrspace(1)* %out, <3 x i64> %foo, i32 %elt) #0 { %dynelt = extractelement <3 x i64> %foo, i32 %elt store volatile i64 %dynelt, i64 addrspace(1)* %out @@ -77,19 +88,28 @@ ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4i64: ; GCN-NOT: buffer_load -; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2 -; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3 -; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] -; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] -; GCN: store_dwordx2 v[{{[0-9:]+}}] +; SI-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; SI-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0 +; SI-DAG: s_cmp_eq_u32 [[IDX]], 2 +; SI-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0 +; SI-DAG: s_cmp_eq_u32 [[IDX]], 3 +; SI-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0 +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] +; SI-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C3]] +; SI: store_dwordx2 v[{{[0-9:]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 +; VI: s_cselect_b64 s{{\[}}[[T0LO:[0-9]+]]:[[T0HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 2 +; VI: s_cselect_b64 s{{\[}}[[T1LO:[0-9]+]]:[[T1HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s{{\[}}[[T0LO]]:[[T0HI]]{{\]}} +; VI: s_cmp_eq_u32 [[IDX:s[0-9]+]], 3 +; VI: s_cselect_b64 s{{\[}}[[T2LO:[0-9]+]]:[[T2HI:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], s{{\[}}[[T1LO]]:[[T1HI]]{{\]}} +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[T2LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[T2HI]] +; VI: store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} define amdgpu_kernel void @dyn_extract_vector_elt_v4i64(i64 addrspace(1)* %out, <4 x i64> %foo, i32 %elt) #0 { %dynelt = extractelement <4 x i64> %foo, i32 %elt store volatile i64 %dynelt, i64 addrspace(1)* %out diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll --- a/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll @@ -15,8 +15,8 @@ ; GCN-NEXT: v_mov_b32_e32 v2, 0 ; GCN-NEXT: s_lshl_b32 s0, s0, 16 ; GCN-NEXT: v_cmp_ne_u32_e32 vcc_lo, -1, v0 -; GCN-NEXT: v_cndmask_b32_e64 v1, 0, s0, vcc_lo ; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo +; GCN-NEXT: v_cndmask_b32_e64 v1, 0, s0, vcc_lo ; GCN-NEXT: flat_store_dword v[0:1], v2 ; GCN-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll --- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll @@ -285,16 +285,18 @@ } ; GCN-LABEL: {{^}}double2_inselt: +; GCN: s_load_dwordx4 s{{\[}}[[FIRST:[0-9]+]]:[[LAST:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}] ; GCN-NOT: v_movrel ; GCN-NOT: buffer_ -; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1 -; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC1]] -; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]] -; GCN-DAG: s_cmp_eq_u32 [[IDX]], 0 -; GCN-DAG: s_cselect_b64 [[CC2:[^,]+]], -1, 0 -; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC2]] -; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]] +; GCN: s_cmp_lg_u32 [[IDX:s[0-9]+]], 1 +; GCN: s_cselect_b64 s{{\[}}[[P0_LO:[0-9]+]]:[[P0_HI:[0-9]+]]{{\]}}, s{{\[}}{{[0-9]+}}:[[LAST]]{{\]}}, 1.0 +; GCN: s_cmp_lg_u32 [[IDX]], 0 +; GCN: s_cselect_b64 s{{\[}}[[P1_LO:[0-9]+]]:[[P1_HI:[0-9]+]]{{\]}}, s{{\[}}[[FIRST]]:{{[0-9]+}}{{\]}}, 1.0 +; GCN: v_mov_b32_e32 v[[V_FIRST:[0-9]+]], s[[P1_LO]] +; GCN: v_mov_b32_e32 v[[V_SECOND:[0-9]+]], s[[P1_HI]] +; GCN: v_mov_b32_e32 v[[V_THIRD:[0-9]+]], s[[P0_LO]] +; GCN: v_mov_b32_e32 v[[V_LAST:[0-9]+]], s[[P0_HI]] +; GCN: flat_store_dwordx4 {{v\[[0-9]+:[0-9]+\]}}, v{{\[}}[[V_FIRST]]:[[V_LAST]]{{\]}} define amdgpu_kernel void @double2_inselt(<2 x double> addrspace(1)* %out, <2 x double> %vec, i32 %sel) { entry: %v = insertelement <2 x double> %vec, double 1.000000e+00, i32 %sel @@ -305,7 +307,7 @@ ; GCN-LABEL: {{^}}double5_inselt: ; GCN-NOT: v_movrel ; GCN-NOT: buffer_ -; GCN-COUNT-10: v_cndmask_b32 +; GCN-COUNT-5: s_cselect_b64 define amdgpu_kernel void @double5_inselt(<5 x double> addrspace(1)* %out, <5 x double> %vec, i32 %sel) { entry: %v = insertelement <5 x double> %vec, double 1.000000e+00, i32 %sel diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll --- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll @@ -1627,25 +1627,22 @@ ; ; VI-LABEL: dynamic_insertelement_v2f64: ; VI: ; %bb.0: +; VI-NEXT: s_load_dword s12, s[4:5], 0x60 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 ; VI-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x30 -; VI-NEXT: s_load_dword s4, s[4:5], 0x60 -; VI-NEXT: v_mov_b32_e32 v1, 0x40200000 +; VI-NEXT: s_mov_b32 s4, 0 +; VI-NEXT: s_mov_b32 s5, 0x40200000 ; VI-NEXT: s_mov_b32 s3, 0x1100f000 -; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s11 -; VI-NEXT: s_cmp_eq_u32 s4, 1 -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; VI-NEXT: v_mov_b32_e32 v0, s10 -; VI-NEXT: s_cmp_eq_u32 s4, 0 -; VI-NEXT: v_cndmask_b32_e64 v2, v0, 0, vcc -; VI-NEXT: v_mov_b32_e32 v0, s9 -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc -; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc +; VI-NEXT: s_cmp_lg_u32 s12, 1 +; VI-NEXT: s_cselect_b64 s[6:7], s[10:11], s[4:5] +; VI-NEXT: s_cmp_lg_u32 s12, 0 +; VI-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5] +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v3, s7 ; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; VI-NEXT: s_endpgm %vecins = insertelement <2 x double> %a, double 8.0, i32 %b @@ -1685,18 +1682,14 @@ ; VI-NEXT: s_mov_b32 s3, 0x1100f000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_cmp_eq_u32 s6, 1 -; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 -; VI-NEXT: v_mov_b32_e32 v0, s11 -; VI-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[4:5] -; VI-NEXT: v_mov_b32_e32 v0, s10 -; VI-NEXT: s_cmp_eq_u32 s6, 0 -; VI-NEXT: v_cndmask_b32_e64 v2, v0, 5, s[4:5] -; VI-NEXT: v_mov_b32_e32 v0, s9 -; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[4:5] -; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: v_cndmask_b32_e64 v0, v0, 5, s[4:5] +; VI-NEXT: s_cmp_lg_u32 s6, 1 +; VI-NEXT: s_cselect_b64 s[4:5], s[10:11], 5 +; VI-NEXT: s_cmp_lg_u32 s6, 0 +; VI-NEXT: s_cselect_b64 s[6:7], s[8:9], 5 +; VI-NEXT: v_mov_b32_e32 v0, s6 +; VI-NEXT: v_mov_b32_e32 v1, s7 +; VI-NEXT: v_mov_b32_e32 v2, s4 +; VI-NEXT: v_mov_b32_e32 v3, s5 ; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; VI-NEXT: s_endpgm %vecins = insertelement <2 x i64> %a, i64 5, i32 %b @@ -1745,25 +1738,19 @@ ; VI-NEXT: s_mov_b32 s3, 0x1100f000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s11 -; VI-NEXT: s_cmp_eq_u32 s12, 1 -; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[4:5] -; VI-NEXT: v_mov_b32_e32 v0, s10 -; VI-NEXT: s_cmp_eq_u32 s12, 0 -; VI-NEXT: v_cndmask_b32_e64 v2, v0, 5, s[4:5] -; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 -; VI-NEXT: v_mov_b32_e32 v0, s9 -; VI-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[4:5] +; VI-NEXT: s_cmp_lg_u32 s12, 1 +; VI-NEXT: s_cselect_b64 s[4:5], s[10:11], 5 +; VI-NEXT: s_cmp_lg_u32 s12, 0 +; VI-NEXT: s_cselect_b64 s[8:9], s[8:9], 5 +; VI-NEXT: s_cmp_lg_u32 s12, 2 +; VI-NEXT: s_cselect_b64 s[6:7], s[6:7], 5 +; VI-NEXT: v_mov_b32_e32 v0, s6 +; VI-NEXT: v_mov_b32_e32 v1, s7 +; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 offset:16 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_cmp_eq_u32 s12, 2 -; VI-NEXT: v_cndmask_b32_e64 v0, v0, 5, s[4:5] -; VI-NEXT: v_mov_b32_e32 v4, s7 -; VI-NEXT: s_cselect_b64 s[4:5], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v5, v4, 0, s[4:5] -; VI-NEXT: v_mov_b32_e32 v4, s6 -; VI-NEXT: v_cndmask_b32_e64 v4, v4, 5, s[4:5] -; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 offset:16 +; VI-NEXT: v_mov_b32_e32 v1, s9 +; VI-NEXT: v_mov_b32_e32 v2, s4 +; VI-NEXT: v_mov_b32_e32 v3, s5 ; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; VI-NEXT: s_endpgm %vecins = insertelement <3 x i64> %a, i64 5, i32 %b @@ -1811,38 +1798,32 @@ ; ; VI-LABEL: dynamic_insertelement_v4f64: ; VI: ; %bb.0: +; VI-NEXT: s_load_dword s16, s[4:5], 0x40 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 ; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20 -; VI-NEXT: s_load_dword s4, s[4:5], 0x40 -; VI-NEXT: v_mov_b32_e32 v4, 0x40200000 +; VI-NEXT: s_mov_b32 s4, 0 +; VI-NEXT: s_mov_b32 s5, 0x40200000 ; VI-NEXT: s_mov_b32 s3, 0x1100f000 -; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s11 -; VI-NEXT: s_cmp_eq_u32 s4, 1 -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_cndmask_b32_e32 v3, v0, v4, vcc -; VI-NEXT: v_mov_b32_e32 v0, s10 -; VI-NEXT: s_cmp_eq_u32 s4, 0 -; VI-NEXT: v_cndmask_b32_e64 v2, v0, 0, vcc -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_mov_b32_e32 v0, s9 -; VI-NEXT: v_cndmask_b32_e32 v1, v0, v4, vcc +; VI-NEXT: s_cmp_lg_u32 s16, 1 +; VI-NEXT: s_cselect_b64 s[6:7], s[10:11], s[4:5] +; VI-NEXT: s_cmp_lg_u32 s16, 0 +; VI-NEXT: s_cselect_b64 s[8:9], s[8:9], s[4:5] +; VI-NEXT: s_cmp_lg_u32 s16, 3 +; VI-NEXT: s_cselect_b64 s[10:11], s[14:15], s[4:5] +; VI-NEXT: s_cmp_lg_u32 s16, 2 +; VI-NEXT: s_cselect_b64 s[4:5], s[12:13], s[4:5] +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v2, s10 +; VI-NEXT: v_mov_b32_e32 v3, s11 +; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 +; VI-NEXT: s_nop 0 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_cmp_eq_u32 s4, 3 -; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_mov_b32_e32 v5, s15 -; VI-NEXT: v_cndmask_b32_e32 v7, v5, v4, vcc -; VI-NEXT: v_mov_b32_e32 v5, s14 -; VI-NEXT: s_cmp_eq_u32 s4, 2 -; VI-NEXT: v_cndmask_b32_e64 v6, v5, 0, vcc -; VI-NEXT: v_mov_b32_e32 v5, s13 -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_cndmask_b32_e32 v5, v5, v4, vcc -; VI-NEXT: v_mov_b32_e32 v4, s12 -; VI-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc -; VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16 +; VI-NEXT: v_mov_b32_e32 v1, s9 +; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v3, s7 ; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; VI-NEXT: s_endpgm %vecins = insertelement <4 x double> %a, double 8.0, i32 %b diff --git a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll @@ -344,46 +344,39 @@ ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_mul_i32 s7, s0, s3 -; GFX9-NEXT: s_mul_hi_u32 s8, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s6, s0, s3 -; GFX9-NEXT: s_add_u32 s9, s8, s7 -; GFX9-NEXT: s_mul_i32 s5, s1, s2 -; GFX9-NEXT: s_addc_u32 s6, 0, s6 -; GFX9-NEXT: s_add_u32 s9, s9, s5 +; GFX9-NEXT: s_mul_i32 s9, s0, s3 +; GFX9-NEXT: s_mul_hi_u32 s10, s0, s2 +; GFX9-NEXT: s_mul_hi_u32 s5, s0, s3 +; GFX9-NEXT: s_add_u32 s6, s10, s9 +; GFX9-NEXT: s_mul_i32 s8, s1, s2 +; GFX9-NEXT: s_addc_u32 s5, 0, s5 +; GFX9-NEXT: s_add_u32 s6, s6, s8 ; GFX9-NEXT: s_mul_hi_u32 s4, s1, s2 -; GFX9-NEXT: s_mul_hi_i32 s10, s1, s3 -; GFX9-NEXT: s_addc_u32 s4, s6, s4 -; GFX9-NEXT: s_addc_u32 s6, s10, 0 -; GFX9-NEXT: s_mul_i32 s9, s1, s3 -; GFX9-NEXT: s_add_u32 s4, s4, s9 -; GFX9-NEXT: s_addc_u32 s6, 0, s6 -; GFX9-NEXT: s_sub_u32 s9, s4, s2 -; GFX9-NEXT: s_subb_u32 s10, s6, 0 +; GFX9-NEXT: s_mul_hi_i32 s7, s1, s3 +; GFX9-NEXT: s_addc_u32 s4, s5, s4 +; GFX9-NEXT: s_addc_u32 s5, s7, 0 +; GFX9-NEXT: s_mul_i32 s6, s1, s3 +; GFX9-NEXT: s_add_u32 s4, s4, s6 +; GFX9-NEXT: s_addc_u32 s5, 0, s5 +; GFX9-NEXT: s_sub_u32 s6, s4, s2 +; GFX9-NEXT: s_subb_u32 s7, s5, 0 ; GFX9-NEXT: s_cmp_lt_i32 s1, 0 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_mov_b32_e32 v0, s6 -; GFX9-NEXT: v_mov_b32_e32 v1, s10 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: v_mov_b32_e32 v2, s9 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, s0, v2 -; GFX9-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v0, vcc +; GFX9-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5] +; GFX9-NEXT: s_sub_u32 s6, s4, s0 +; GFX9-NEXT: s_subb_u32 s7, s5, 0 ; GFX9-NEXT: s_cmp_lt_i32 s3, 0 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: s_add_i32 s1, s8, s7 -; GFX9-NEXT: s_add_i32 s1, s1, s5 -; GFX9-NEXT: s_ashr_i32 s4, s1, 31 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc -; GFX9-NEXT: s_mov_b32 s5, s4 -; GFX9-NEXT: s_mul_i32 s0, s0, s2 -; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, s[4:5], v[0:1] -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v2, s1 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc +; GFX9-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5] +; GFX9-NEXT: s_add_i32 s1, s10, s9 +; GFX9-NEXT: s_add_i32 s1, s1, s8 +; GFX9-NEXT: s_ashr_i32 s6, s1, 31 +; GFX9-NEXT: s_mov_b32 s7, s6 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], s[6:7] +; GFX9-NEXT: s_mul_i32 s2, s0, s2 +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX9-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] ; GFX9-NEXT: global_store_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_endpgm ; @@ -391,42 +384,37 @@ ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_mul_i32 s7, s0, s3 -; GFX10-NEXT: s_mul_hi_u32 s8, s0, s2 -; GFX10-NEXT: s_mul_hi_u32 s6, s0, s3 -; GFX10-NEXT: s_add_u32 s11, s8, s7 -; GFX10-NEXT: s_mul_i32 s5, s1, s2 -; GFX10-NEXT: s_addc_u32 s6, 0, s6 +; GFX10-NEXT: s_mul_i32 s9, s0, s3 +; GFX10-NEXT: s_mul_hi_u32 s10, s0, s2 +; GFX10-NEXT: s_mul_hi_u32 s5, s0, s3 +; GFX10-NEXT: s_add_u32 s11, s10, s9 +; GFX10-NEXT: s_mul_i32 s8, s1, s2 +; GFX10-NEXT: s_addc_u32 s5, 0, s5 ; GFX10-NEXT: s_mul_hi_u32 s4, s1, s2 -; GFX10-NEXT: s_add_u32 s11, s11, s5 -; GFX10-NEXT: s_mul_hi_i32 s9, s1, s3 -; GFX10-NEXT: s_addc_u32 s4, s6, s4 -; GFX10-NEXT: s_mul_i32 s10, s1, s3 -; GFX10-NEXT: s_addc_u32 s6, s9, 0 -; GFX10-NEXT: s_add_u32 s4, s4, s10 -; GFX10-NEXT: s_addc_u32 s6, 0, s6 -; GFX10-NEXT: s_sub_u32 s9, s4, s2 -; GFX10-NEXT: s_subb_u32 s10, s6, 0 -; GFX10-NEXT: v_mov_b32_e32 v1, s9 +; GFX10-NEXT: s_add_u32 s11, s11, s8 +; GFX10-NEXT: s_mul_hi_i32 s6, s1, s3 +; GFX10-NEXT: s_addc_u32 s4, s5, s4 +; GFX10-NEXT: s_mul_i32 s7, s1, s3 +; GFX10-NEXT: s_addc_u32 s5, s6, 0 +; GFX10-NEXT: s_add_u32 s4, s4, s7 +; GFX10-NEXT: s_addc_u32 s5, 0, s5 +; GFX10-NEXT: s_sub_u32 s6, s4, s2 +; GFX10-NEXT: s_subb_u32 s7, s5, 0 ; GFX10-NEXT: s_cmp_lt_i32 s1, 0 -; GFX10-NEXT: v_mov_b32_e32 v0, s10 -; GFX10-NEXT: s_cselect_b32 vcc_lo, -1, 0 +; GFX10-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5] +; GFX10-NEXT: s_sub_u32 s6, s4, s0 +; GFX10-NEXT: s_subb_u32 s7, s5, 0 ; GFX10-NEXT: s_cmp_lt_i32 s3, 0 -; GFX10-NEXT: v_cndmask_b32_e32 v2, s4, v1, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v0, s6, v0, vcc_lo -; GFX10-NEXT: v_sub_co_u32 v3, vcc_lo, v2, s0 ; GFX10-NEXT: s_mul_i32 s0, s0, s2 -; GFX10-NEXT: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v0, vcc_lo -; GFX10-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX10-NEXT: s_add_i32 s1, s8, s7 -; GFX10-NEXT: s_add_i32 s1, s1, s5 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo -; GFX10-NEXT: s_ashr_i32 s4, s1, 31 -; GFX10-NEXT: s_mov_b32 s5, s4 -; GFX10-NEXT: v_cmp_ne_u64_e32 vcc_lo, s[4:5], v[0:1] -; GFX10-NEXT: v_cndmask_b32_e64 v1, s1, 0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v0, s0, 0, vcc_lo +; GFX10-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5] +; GFX10-NEXT: s_add_i32 s1, s10, s9 +; GFX10-NEXT: s_add_i32 s1, s1, s8 +; GFX10-NEXT: s_ashr_i32 s6, s1, 31 +; GFX10-NEXT: s_mov_b32 s7, s6 +; GFX10-NEXT: s_cmp_lg_u64 s[4:5], s[6:7] +; GFX10-NEXT: s_cselect_b32 s2, -1, 0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, s1, 0, s2 +; GFX10-NEXT: v_cndmask_b32_e64 v0, s0, 0, s2 ; GFX10-NEXT: global_store_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_endpgm bb: diff --git a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll --- a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll +++ b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll @@ -9,8 +9,7 @@ ; GCN: s_load_dwordx2 ; GCN: s_cmp_eq_u32 -; GCN: v_cndmask_b32 -; GCN: v_cndmask_b32 +; GCN: s_cselect_b64 ; GCN-NOT: load_dword ; GCN: flat_load_dwordx2 @@ -35,8 +34,7 @@ ; GCN: s_load_dwordx2 ; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}} ; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}} -; GCN: v_cndmask_b32 -; GCN: v_cndmask_b32 +; GCN: s_cselect_b64 ; GCN: flat_store_dwordx2 define amdgpu_kernel void @select_ptr_crash_i64_global(i32 %tmp, [8 x i32], i64 addrspace(1)* %ptr0, [8 x i32], i64 addrspace(1)* %ptr1, [8 x i32], i64 addrspace(1)* %ptr2) { %tmp2 = icmp eq i32 %tmp, 0 diff --git a/llvm/test/CodeGen/AMDGPU/select64.ll b/llvm/test/CodeGen/AMDGPU/select64.ll --- a/llvm/test/CodeGen/AMDGPU/select64.ll +++ b/llvm/test/CodeGen/AMDGPU/select64.ll @@ -5,8 +5,10 @@ ; i64 select should be split into two i32 selects, and we shouldn't need ; to use a shfit to extract the hi dword of the input. ; GCN-NOT: s_lshr_b64 -; GCN: v_cndmask -; GCN: v_cndmask +; SI: v_cndmask +; SI: v_cndmask +; VI: s_cmp_lt_u32 +; VI: s_cselect_b64 define amdgpu_kernel void @select0(i64 addrspace(1)* %out, i32 %cond, i64 %in) { entry: %0 = icmp ugt i32 %cond, 5 @@ -57,8 +59,11 @@ } ; GCN-LABEL: {{^}}v_select_i64_split_imm: -; GCN-DAG: v_cndmask_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}} -; GCN-DAG: v_cndmask_b32_e32 {{v[0-9]+}}, 63, {{v[0-9]+}} +; SI-DAG: v_cndmask_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}} +; SI-DAG: v_cndmask_b32_e32 {{v[0-9]+}}, 63, {{v[0-9]+}} +; VI-DAG: s_mov_b32 s[[LO:[0-9]+]], 0 +; VI-DAG: s_mov_b32 s[[HI:[0-9]+]], 63 +; VI-DAG: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[LO]]:[[HI]]{{\]}} ; GCN: s_endpgm define amdgpu_kernel void @v_select_i64_split_imm(i64 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { %cmp = icmp ugt i32 %cond, 5 diff --git a/llvm/test/CodeGen/AMDGPU/selectcc.ll b/llvm/test/CodeGen/AMDGPU/selectcc.ll --- a/llvm/test/CodeGen/AMDGPU/selectcc.ll +++ b/llvm/test/CodeGen/AMDGPU/selectcc.ll @@ -1,6 +1,6 @@ ; RUN: llc -verify-machineinstrs -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s -; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,SI -check-prefix=FUNC %s -; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,VI -check-prefix=FUNC %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s ; FUNC-LABEL: {{^}}selectcc_i64: ; EG: XOR_INT @@ -9,9 +9,10 @@ ; EG: CNDE_INT ; EG: CNDE_INT ; SI: v_cmp_eq_u64 +; SI: v_cndmask +; SI: v_cndmask ; VI: s_cmp_eq_u64 -; GCN: v_cndmask -; GCN: v_cndmask +; VI: s_cselect_b64 define amdgpu_kernel void @selectcc_i64(i64 addrspace(1) * %out, i64 %lhs, i64 %rhs, i64 %true, i64 %false) { entry: %0 = icmp eq i64 %lhs, %rhs diff --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll --- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll @@ -16,10 +16,10 @@ ; GCN-LABEL: {{^}}sint_to_fp_i1_f64: ; VI-DAG: s_cmp_eq_u32 -; VI-DAG: s_cselect_b32 s[[SSEL:[0-9]+]], 0xbff00000, 0 -; VI-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}} -; VI-DAG: v_mov_b32_e32 v[[SEL:[0-9]+]], s[[SSEL]] -; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}} +; VI-DAG: s_cselect_b64 s{{\[}}[[S_LO:[0-9]+]]:[[S_HI:[0-9]+]]{{\]}}, -1.0, 0 +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[S_LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[S_HI]] +; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} ; VI: s_endpgm ; SI-DAG: s_cmp_eq_u32 diff --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll --- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll @@ -76,13 +76,15 @@ ; GCN-LABEL: {{^}}uint_to_fp_i1_to_f64: ; VI-DAG: s_cmp_eq_u32 -; VI-DAG: s_cselect_b32 s[[SSEL:[0-9]+]], 0x3ff00000, 0 -; VI-DAG: v_mov_b32_e32 v[[SEL:[0-9]+]], s[[SSEL]] +; VI-DAG: s_cselect_b64 s{{\[}}[[S_LO:[0-9]+]]:[[S_HI:[0-9]+]]{{\]}}, 1.0, 0 +; VI-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[S_LO]] +; VI-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[S_HI]] +; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[V_LO]]:[[V_HI]]{{\]}} ; SI-DAG: s_cmp_eq_u32 ; SI-DAG: s_cselect_b64 vcc, -1, 0 ; SI-DAG: v_cndmask_b32_e32 v[[SEL:[0-9]+]], 0, {{v[0-9]+}}, vcc -; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}} -; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}} +; SI-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}} +; SI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}} ; GCN: s_endpgm define amdgpu_kernel void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) { %cmp = icmp eq i32 %in, 0