diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h --- a/llvm/lib/Target/AMDGPU/AMDGPU.h +++ b/llvm/lib/Target/AMDGPU/AMDGPU.h @@ -347,6 +347,9 @@ FunctionPass *createAMDGPUSetWavePriorityPass(); void initializeAMDGPUSetWavePriorityPass(PassRegistry &); +void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &); +extern char &GCNRewritePartialRegUsesID; + namespace AMDGPU { enum TargetIndex { TI_CONSTDATA_START, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -408,6 +408,7 @@ initializeAMDGPUResourceUsageAnalysisPass(*PR); initializeGCNNSAReassignPass(*PR); initializeGCNPreRAOptimizationsPass(*PR); + initializeGCNRewritePartialRegUsesPass(*PR); } static std::unique_ptr createTLOF(const Triple &TT) { @@ -1247,6 +1248,8 @@ if (OptExecMaskPreRA) insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); + insertPass(&RenameIndependentSubregsID, &GCNRewritePartialRegUsesID); + if (isPassEnabled(EnablePreRAOptimizations)) insertPass(&RenameIndependentSubregsID, &GCNPreRAOptimizationsID); diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt --- a/llvm/lib/Target/AMDGPU/CMakeLists.txt +++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt @@ -110,6 +110,7 @@ GCNNSAReassign.cpp GCNPreRAOptimizations.cpp GCNRegPressure.cpp + GCNRewritePartialRegUses.cpp GCNSchedStrategy.cpp GCNVOPDUtils.cpp R600AsmPrinter.cpp diff --git a/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp b/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp @@ -0,0 +1,330 @@ +//===-------------- GCNRewritePartialRegUses.cpp --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// RenameIndependentSubregs pass leaves large partially used super registers, +/// for example: +/// undef %0.sub4:VReg_1024 = ... +/// %0.sub5:VReg_1024 = ... +/// %0.sub6:VReg_1024 = ... +/// %0.sub7:VReg_1024 = ... +/// use %0.sub4_sub5_sub6_sub7 +/// use %0.sub6_sub7 +/// +/// GCNRewritePartialRegUses goes right after RenameIndependentSubregs and +/// rewrites such partially used super registers with registers of minimal size: +/// undef %0.sub0:VReg_128 = ... +/// %0.sub1:VReg_128 = ... +/// %0.sub2:VReg_128 = ... +/// %0.sub3:VReg_128 = ... +/// use %0.sub0_sub1_sub2_sub3 +/// use %0.sub2_sub3 +/// +/// This allows to avoid subreg lanemasks tracking during register pressure +/// calculation and creates more possibilities for the code unaware of lanemasks +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "MCTargetDesc/AMDGPUMCTargetDesc.h" +#include "llvm/CodeGen/LiveInterval.h" +#include "llvm/CodeGen/LiveIntervals.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" + +using namespace llvm; + +#define DEBUG_TYPE "rewrite-partial-reg-uses" + +namespace { + +class GCNRewritePartialRegUses : public MachineFunctionPass { +public: + static char ID; + GCNRewritePartialRegUses() : MachineFunctionPass(ID) {} + + StringRef getPassName() const override { + return "Rewrite Partial Register Uses"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addPreserved(); + AU.addPreserved(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + +private: + MachineRegisterInfo *MRI; + const TargetRegisterInfo *TRI; + const TargetInstrInfo *TII; + LiveIntervals *LIS; + + mutable SmallDenseMap, unsigned> SubRegs; + + mutable SmallDenseMap, + const uint32_t *> + SuperRegMasks; + + bool rewriteReg(Register Reg); + + struct RegClassSubReg { + const TargetRegisterClass *RC; + unsigned SubReg = 0; + RegClassSubReg(const TargetRegisterClass *RC_ = nullptr) : RC(RC_) {} + }; + + typedef SmallDenseMap SubRegMap; + + const TargetRegisterClass *getMinSizeReg(Register Reg, SubRegMap &SubRegs); + + const TargetRegisterClass * + getRegClassWithShiftedSubregs(const TargetRegisterClass *RC, unsigned RShift, + unsigned CoverSubregIdx, SubRegMap &SubRegs); + + const TargetRegisterClass *getOperandRegClass(MachineOperand &MO) const; + + unsigned getSubReg(unsigned Offset, unsigned Size) const; + + unsigned shiftSubReg(unsigned SubReg, unsigned RShift) const; + + const uint32_t *getSuperRegClassMask(const TargetRegisterClass *RC, + unsigned SubRegIdx) const; +}; + +} // end anonymous namespace + +unsigned GCNRewritePartialRegUses::getSubReg(unsigned Offset, + unsigned Size) const { + auto R = SubRegs.try_emplace({Offset, Size}, 0); + if (R.second) { + for (unsigned Idx = 1, E = TRI->getNumSubRegIndices(); Idx < E; ++Idx) { + if (TRI->getSubRegIdxOffset(Idx) == Offset && + TRI->getSubRegIdxSize(Idx) == Size) { + R.first->second = Idx; + break; + } + } + } + return R.first->second; +} + +// Find right shifted by RShift amount version of the SubReg if it exists. +unsigned GCNRewritePartialRegUses::shiftSubReg(unsigned SubReg, + unsigned RShift) const { + unsigned Offset = TRI->getSubRegIdxOffset(SubReg) - RShift; + return getSubReg(Offset, TRI->getSubRegIdxSize(SubReg)); +} + +// Return bit mask that contains all register classes that are projected into RC +// by SubRegIdx. +const uint32_t * +GCNRewritePartialRegUses::getSuperRegClassMask(const TargetRegisterClass *RC, + unsigned SubRegIdx) const { + auto R = SuperRegMasks.try_emplace({RC, SubRegIdx}, nullptr); + if (R.second) { + for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) { + if (RCI.getSubReg() == SubRegIdx) { + R.first->second = RCI.getMask(); + break; + } + } + } + return R.first->second; +} + +// Try to find register class with registers of minimal size for a given +// register class RC and subregs as keys in SubRegs by shifting offsets of the +// subregs by RShift value to the right. If found return the resulting regclass +// and new shifted subregs as values in SubRegs. If CoverSubregIdx isn't null +// it specifies subreg that cover all other subregs. +const TargetRegisterClass * +GCNRewritePartialRegUses::getRegClassWithShiftedSubregs( + const TargetRegisterClass *RC, unsigned RShift, unsigned CoverSubregIdx, + SubRegMap &SubRegs) { + LLVM_DEBUG(dbgs() << " Try shift " << RShift << '\n'); + BitVector ClassMask(TRI->getNumRegClasses(), true /* all bits set to 1 */); + for (auto &P : SubRegs) { + unsigned OldSubReg = P.first; + unsigned &NewSubReg = P.second.SubReg; + auto *&SubRegRC = P.second.RC; + + // Instruction operand may not specify required register class (ex. COPY). + if (!SubRegRC) + SubRegRC = TRI->getSubRegisterClass(RC, OldSubReg); + + if (!SubRegRC) + return nullptr; + + LLVM_DEBUG(dbgs() << " " << TRI->getSubRegIndexName(OldSubReg) << ':' + << TRI->getRegClassName(SubRegRC) + << (SubRegRC->isAllocatable() ? "" : " not alloc") + << " -> "); + + if (OldSubReg == CoverSubregIdx) { + NewSubReg = 0; + LLVM_DEBUG(dbgs() << "whole reg\n"); + } else { + NewSubReg = shiftSubReg(OldSubReg, RShift); + if (!NewSubReg) { + LLVM_DEBUG(dbgs() << "none\n"); + return nullptr; + } + LLVM_DEBUG(dbgs() << TRI->getSubRegIndexName(NewSubReg) << '\n'); + } + + const uint32_t *Mask = NewSubReg ? getSuperRegClassMask(SubRegRC, NewSubReg) + : SubRegRC->getSubClassMask(); + if (!Mask) + return nullptr; + + ClassMask.clearBitsNotInMask(Mask); + } + + const TargetRegisterClass *MinRC = nullptr; + unsigned MinNumBits = std::numeric_limits::max(); + for (unsigned ClassID : ClassMask.set_bits()) { + auto *RC = TRI->getRegClass(ClassID); + if (!RC->isAllocatable()) + continue; + + unsigned NumBits = TRI->getRegSizeInBits(*RC); +#ifdef DUMP_ALL_SUPERREG_CLASSES + LLVM_DEBUG(dbgs() << " " << NumBits << ' ' << TRI->getRegClassName(RC) + << '\n'); +#endif + if (NumBits < MinNumBits) { + MinNumBits = NumBits; + MinRC = RC; + } + } +#ifndef NDEBUG + if (MinRC) { + for (auto &P : SubRegs) + assert(MinRC == TRI->getSubClassWithSubReg(MinRC, P.second.SubReg)); + } +#endif + return MinRC; +} + +// Given register Reg and its used subreg indexes as keys in SubRegs map +// return register class and new subreg indexes as values in SubRegs map such +// that the resulting register would have minimal size. +const TargetRegisterClass * +GCNRewritePartialRegUses::getMinSizeReg(Register Reg, SubRegMap &SubRegs) { + + unsigned MinBit = std::numeric_limits::max(); + unsigned MaxBit = 0; + unsigned CoverSubregIdx = 0; + for (auto &P : SubRegs) { + unsigned Start = TRI->getSubRegIdxOffset(P.first); + unsigned End = Start + TRI->getSubRegIdxSize(P.first); + if (Start < MinBit) { + MinBit = Start; + CoverSubregIdx = 0; + } + if (End > MaxBit) { + MaxBit = End; + CoverSubregIdx = 0; + } + if (Start == MinBit && End == MaxBit) + CoverSubregIdx = P.first; + } + + auto *RC = MRI->getRegClass(Reg); + if (auto *NewRC = + getRegClassWithShiftedSubregs(RC, MinBit, CoverSubregIdx, SubRegs)) + return NewRC; + + // TODO: this may require trying other offsets if for example we got 16-bit + // subreg at lowest offset followed by 32-bit subreg. + + return nullptr; +} + +const TargetRegisterClass * +GCNRewritePartialRegUses::getOperandRegClass(MachineOperand &MO) const { + MachineInstr *MI = MO.getParent(); + return TII->getRegClass(TII->get(MI->getOpcode()), MI->getOperandNo(&MO), TRI, + *MI->getParent()->getParent()); +} + +bool GCNRewritePartialRegUses::rewriteReg(Register Reg) { + auto Range = MRI->reg_nodbg_operands(Reg); + if (Range.begin() == Range.end()) + return false; + + for (MachineOperand &MO : Range) { + if (MO.getSubReg() == 0) // Whole reg used, nothing to do. + return false; + } + + // Collect used subregs and their constrained reg classes infered from + // instruction operands. + SubRegMap SubRegs; + for (MachineOperand &MO : MRI->reg_nodbg_operands(Reg)) { + assert(MO.getSubReg() != 0); + auto *OpRC = getOperandRegClass(MO); + auto R = SubRegs.try_emplace(MO.getSubReg(), OpRC); + if (!R.second) { + auto &V = R.first->second; + V.RC = TRI->getCommonSubClass(V.RC, OpRC); + } + } + + LLVM_DEBUG(dbgs() << "Rewrite partial reg " << printReg(Reg, TRI) << ':' + << TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n'); + + auto *NewRC = getMinSizeReg(Reg, SubRegs); + if (!NewRC) + return false; + + Register NewReg = MRI->createVirtualRegister(NewRC); + LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << ':' + << TRI->getRegClassName(MRI->getRegClass(Reg)) << " -> " + << printReg(NewReg, TRI) << ':' + << TRI->getRegClassName(NewRC) << '\n'); + + for (auto &MO : make_early_inc_range(MRI->reg_nodbg_operands(Reg))) { + MO.setReg(NewReg); + unsigned SubReg = SubRegs[MO.getSubReg()].SubReg; + MO.setSubReg(SubReg); + if (SubReg == 0 && MO.isDef()) + MO.setIsUndef(false); + } + if (LIS) { + LIS->removeInterval(Reg); + LIS->createAndComputeVirtRegInterval(NewReg); + } + return true; +} + +bool GCNRewritePartialRegUses::runOnMachineFunction(MachineFunction &MF) { + MRI = &MF.getRegInfo(); + TRI = MRI->getTargetRegisterInfo(); + TII = MF.getSubtarget().getInstrInfo(); + LIS = getAnalysisIfAvailable(); + bool Changed = false; + for (size_t I = 0, E = MRI->getNumVirtRegs(); I < E; ++I) { + Changed |= rewriteReg(Register::index2VirtReg(I)); + } + return Changed; +} + +char GCNRewritePartialRegUses::ID; + +char &llvm::GCNRewritePartialRegUsesID = GCNRewritePartialRegUses::ID; + +INITIALIZE_PASS_BEGIN(GCNRewritePartialRegUses, DEBUG_TYPE, + "Rewrite Partial Register Uses", false, false) +INITIALIZE_PASS_END(GCNRewritePartialRegUses, DEBUG_TYPE, + "Rewrite Partial Register Uses", false, false) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll @@ -11,14 +11,13 @@ ; GCN-NEXT: s_mov_b32 s4, s33 ; GCN-NEXT: s_add_i32 s33, s32, 0x3fc0 ; GCN-NEXT: s_and_b32 s33, s33, 0xffffc000 -; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill @@ -27,133 +26,115 @@ ; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 ; 4-byte Folded Spill -; GCN-NEXT: v_mov_b32_e32 v6, v2 -; GCN-NEXT: global_load_dwordx4 v[2:5], v[0:1], off -; GCN-NEXT: global_load_dwordx4 v[16:19], v[0:1], off offset:16 -; GCN-NEXT: global_load_dwordx4 v[56:59], v[0:1], off offset:32 -; GCN-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:48 -; GCN-NEXT: global_load_dwordx4 v[20:23], v[0:1], off offset:64 -; GCN-NEXT: global_load_dwordx4 v[44:47], v[0:1], off offset:80 -; GCN-NEXT: global_load_dwordx4 v[40:43], v[0:1], off offset:96 -; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:112 -; GCN-NEXT: global_load_dwordx4 v[36:39], v[0:1], off offset:128 -; GCN-NEXT: global_load_dwordx4 v[32:35], v[0:1], off offset:144 -; GCN-NEXT: global_load_dwordx4 v[28:31], v[0:1], off offset:160 -; GCN-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:176 -; GCN-NEXT: global_load_dwordx4 v[24:27], v[0:1], off offset:192 -; GCN-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:208 +; GCN-NEXT: global_load_dwordx4 v[3:6], v[0:1], off +; GCN-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:16 +; GCN-NEXT: global_load_dwordx4 v[11:14], v[0:1], off offset:32 +; GCN-NEXT: global_load_dwordx4 v[15:18], v[0:1], off offset:48 +; GCN-NEXT: global_load_dwordx4 v[19:22], v[0:1], off offset:64 +; GCN-NEXT: global_load_dwordx4 v[23:26], v[0:1], off offset:80 +; GCN-NEXT: global_load_dwordx4 v[27:30], v[0:1], off offset:96 +; GCN-NEXT: global_load_dwordx4 v[31:34], v[0:1], off offset:112 +; GCN-NEXT: global_load_dwordx4 v[35:38], v[0:1], off offset:128 +; GCN-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:144 +; GCN-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:160 +; GCN-NEXT: global_load_dwordx4 v[39:42], v[0:1], off offset:176 +; GCN-NEXT: global_load_dwordx4 v[43:46], v[0:1], off offset:192 +; GCN-NEXT: global_load_dwordx4 v[56:59], v[0:1], off offset:208 +; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:224 ; GCN-NEXT: s_add_i32 s32, s32, 0x10000 ; GCN-NEXT: s_add_i32 s32, s32, 0xffff0000 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:544 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:548 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:552 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:556 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:560 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:564 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:568 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:572 ; 4-byte Folded Spill -; GCN-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:224 -; GCN-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:240 +; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill +; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:240 +; GCN-NEXT: v_and_b32_e32 v0, 63, v2 ; GCN-NEXT: v_lshrrev_b32_e64 v1, 6, s33 -; GCN-NEXT: v_add_u32_e32 v1, 0x100, v1 -; GCN-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:256 -; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:260 -; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:264 -; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:268 -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:272 -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:276 -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:280 -; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:284 -; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:288 -; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:292 -; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:296 -; GCN-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:300 -; GCN-NEXT: buffer_store_dword v48, off, s[0:3], s33 offset:304 -; GCN-NEXT: buffer_store_dword v49, off, s[0:3], s33 offset:308 -; GCN-NEXT: buffer_store_dword v50, off, s[0:3], s33 offset:312 -; GCN-NEXT: buffer_store_dword v51, off, s[0:3], s33 offset:316 -; GCN-NEXT: buffer_store_dword v20, off, s[0:3], s33 offset:320 -; GCN-NEXT: buffer_store_dword v21, off, s[0:3], s33 offset:324 -; GCN-NEXT: buffer_store_dword v22, off, s[0:3], s33 offset:328 -; GCN-NEXT: buffer_store_dword v23, off, s[0:3], s33 offset:332 -; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:336 -; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:340 -; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:344 -; GCN-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:348 -; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:352 -; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:356 -; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:360 -; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:364 -; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:368 -; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:372 -; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:376 -; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:380 -; GCN-NEXT: buffer_store_dword v36, off, s[0:3], s33 offset:384 -; GCN-NEXT: buffer_store_dword v37, off, s[0:3], s33 offset:388 -; GCN-NEXT: buffer_store_dword v38, off, s[0:3], s33 offset:392 -; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:396 -; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:400 -; GCN-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:404 -; GCN-NEXT: buffer_store_dword v34, off, s[0:3], s33 offset:408 -; GCN-NEXT: buffer_store_dword v35, off, s[0:3], s33 offset:412 -; GCN-NEXT: buffer_store_dword v28, off, s[0:3], s33 offset:416 -; GCN-NEXT: buffer_store_dword v29, off, s[0:3], s33 offset:420 -; GCN-NEXT: buffer_store_dword v30, off, s[0:3], s33 offset:424 -; GCN-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:428 -; GCN-NEXT: buffer_store_dword v52, off, s[0:3], s33 offset:432 -; GCN-NEXT: buffer_store_dword v53, off, s[0:3], s33 offset:436 -; GCN-NEXT: buffer_store_dword v54, off, s[0:3], s33 offset:440 -; GCN-NEXT: buffer_store_dword v55, off, s[0:3], s33 offset:444 -; GCN-NEXT: buffer_store_dword v24, off, s[0:3], s33 offset:448 -; GCN-NEXT: buffer_store_dword v25, off, s[0:3], s33 offset:452 -; GCN-NEXT: buffer_store_dword v26, off, s[0:3], s33 offset:456 -; GCN-NEXT: buffer_store_dword v27, off, s[0:3], s33 offset:460 -; GCN-NEXT: buffer_load_dword v16, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v17, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s33 offset:544 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s33 offset:548 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s33 offset:552 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s33 offset:556 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s33 offset:560 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s33 offset:564 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s33 offset:568 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s33 offset:572 ; 4-byte Folded Reload -; GCN-NEXT: v_and_b32_e32 v0, 63, v6 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GCN-NEXT: v_add_u32_e32 v1, 0x100, v1 ; GCN-NEXT: v_add_u32_e32 v0, v1, v0 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v16, v20 -; GCN-NEXT: v_mov_b32_e32 v17, v21 -; GCN-NEXT: v_mov_b32_e32 v18, v22 -; GCN-NEXT: v_mov_b32_e32 v19, v23 -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:464 -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:468 -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:472 -; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:476 -; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:480 -; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:484 -; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:488 -; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:492 -; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:496 -; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:500 -; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:504 -; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:508 +; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:256 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:260 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:264 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:268 +; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:272 +; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:276 +; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:280 +; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:284 +; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:288 +; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:292 +; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:296 +; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:300 +; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:304 +; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:308 +; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:312 +; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:316 +; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:320 +; GCN-NEXT: buffer_store_dword v20, off, s[0:3], s33 offset:324 +; GCN-NEXT: buffer_store_dword v21, off, s[0:3], s33 offset:328 +; GCN-NEXT: buffer_store_dword v22, off, s[0:3], s33 offset:332 +; GCN-NEXT: buffer_store_dword v23, off, s[0:3], s33 offset:336 +; GCN-NEXT: buffer_store_dword v24, off, s[0:3], s33 offset:340 +; GCN-NEXT: buffer_store_dword v25, off, s[0:3], s33 offset:344 +; GCN-NEXT: buffer_store_dword v26, off, s[0:3], s33 offset:348 +; GCN-NEXT: buffer_store_dword v27, off, s[0:3], s33 offset:352 +; GCN-NEXT: buffer_store_dword v28, off, s[0:3], s33 offset:356 +; GCN-NEXT: buffer_store_dword v29, off, s[0:3], s33 offset:360 +; GCN-NEXT: buffer_store_dword v30, off, s[0:3], s33 offset:364 +; GCN-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:368 +; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:372 +; GCN-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:376 +; GCN-NEXT: buffer_store_dword v34, off, s[0:3], s33 offset:380 +; GCN-NEXT: buffer_store_dword v35, off, s[0:3], s33 offset:384 +; GCN-NEXT: buffer_store_dword v36, off, s[0:3], s33 offset:388 +; GCN-NEXT: buffer_store_dword v37, off, s[0:3], s33 offset:392 +; GCN-NEXT: buffer_store_dword v38, off, s[0:3], s33 offset:396 +; GCN-NEXT: buffer_store_dword v48, off, s[0:3], s33 offset:400 +; GCN-NEXT: buffer_store_dword v49, off, s[0:3], s33 offset:404 +; GCN-NEXT: buffer_store_dword v50, off, s[0:3], s33 offset:408 +; GCN-NEXT: buffer_store_dword v51, off, s[0:3], s33 offset:412 +; GCN-NEXT: buffer_store_dword v52, off, s[0:3], s33 offset:416 +; GCN-NEXT: buffer_store_dword v53, off, s[0:3], s33 offset:420 +; GCN-NEXT: buffer_store_dword v54, off, s[0:3], s33 offset:424 +; GCN-NEXT: buffer_store_dword v55, off, s[0:3], s33 offset:428 +; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:432 +; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:436 +; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:440 +; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:444 +; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:448 +; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:452 +; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:456 +; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:460 +; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:464 +; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:468 +; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:472 +; GCN-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:476 +; GCN-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:480 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:484 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:488 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:492 +; GCN-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:496 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:500 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:504 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:508 ; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen ; GCN-NEXT: buffer_load_dword v63, off, s[0:3], s33 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload @@ -163,14 +144,13 @@ ; GCN-NEXT: buffer_load_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload ; GCN-NEXT: s_mov_b32 s33, s4 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] @@ -186,14 +166,13 @@ ; GCN-NEXT: s_mov_b32 s4, s33 ; GCN-NEXT: s_add_i32 s33, s32, 0x3fc0 ; GCN-NEXT: s_and_b32 s33, s33, 0xffffc000 -; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill @@ -202,135 +181,117 @@ ; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 ; 4-byte Folded Spill -; GCN-NEXT: v_mov_b32_e32 v6, v2 -; GCN-NEXT: global_load_dwordx4 v[2:5], v[0:1], off -; GCN-NEXT: global_load_dwordx4 v[16:19], v[0:1], off offset:16 -; GCN-NEXT: global_load_dwordx4 v[56:59], v[0:1], off offset:32 -; GCN-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:48 -; GCN-NEXT: global_load_dwordx4 v[20:23], v[0:1], off offset:64 -; GCN-NEXT: global_load_dwordx4 v[44:47], v[0:1], off offset:80 -; GCN-NEXT: global_load_dwordx4 v[40:43], v[0:1], off offset:96 -; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:112 -; GCN-NEXT: global_load_dwordx4 v[36:39], v[0:1], off offset:128 -; GCN-NEXT: global_load_dwordx4 v[32:35], v[0:1], off offset:144 -; GCN-NEXT: global_load_dwordx4 v[28:31], v[0:1], off offset:160 -; GCN-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:176 -; GCN-NEXT: global_load_dwordx4 v[24:27], v[0:1], off offset:192 -; GCN-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:208 +; GCN-NEXT: global_load_dwordx4 v[3:6], v[0:1], off +; GCN-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:16 +; GCN-NEXT: global_load_dwordx4 v[11:14], v[0:1], off offset:32 +; GCN-NEXT: global_load_dwordx4 v[15:18], v[0:1], off offset:48 +; GCN-NEXT: global_load_dwordx4 v[19:22], v[0:1], off offset:64 +; GCN-NEXT: global_load_dwordx4 v[23:26], v[0:1], off offset:80 +; GCN-NEXT: global_load_dwordx4 v[27:30], v[0:1], off offset:96 +; GCN-NEXT: global_load_dwordx4 v[31:34], v[0:1], off offset:112 +; GCN-NEXT: global_load_dwordx4 v[35:38], v[0:1], off offset:128 +; GCN-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:144 +; GCN-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:160 +; GCN-NEXT: global_load_dwordx4 v[39:42], v[0:1], off offset:176 +; GCN-NEXT: global_load_dwordx4 v[43:46], v[0:1], off offset:192 +; GCN-NEXT: global_load_dwordx4 v[56:59], v[0:1], off offset:208 +; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:224 ; GCN-NEXT: s_add_i32 s32, s32, 0x10000 ; GCN-NEXT: s_add_i32 s32, s32, 0xffff0000 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:544 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:548 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:552 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:556 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:560 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:564 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:568 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:572 ; 4-byte Folded Spill -; GCN-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:224 -; GCN-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:240 -; GCN-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:256 -; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:260 -; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:264 -; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:268 -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:272 -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:276 -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:280 -; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:284 -; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:288 -; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:292 -; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:296 -; GCN-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:300 -; GCN-NEXT: buffer_store_dword v48, off, s[0:3], s33 offset:304 -; GCN-NEXT: buffer_store_dword v49, off, s[0:3], s33 offset:308 -; GCN-NEXT: buffer_store_dword v50, off, s[0:3], s33 offset:312 -; GCN-NEXT: buffer_store_dword v51, off, s[0:3], s33 offset:316 -; GCN-NEXT: buffer_store_dword v20, off, s[0:3], s33 offset:320 -; GCN-NEXT: buffer_store_dword v21, off, s[0:3], s33 offset:324 -; GCN-NEXT: buffer_store_dword v22, off, s[0:3], s33 offset:328 -; GCN-NEXT: buffer_store_dword v23, off, s[0:3], s33 offset:332 -; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:336 -; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:340 -; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:344 -; GCN-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:348 -; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:352 -; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:356 -; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:360 -; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:364 -; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:368 -; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:372 -; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:376 -; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:380 -; GCN-NEXT: buffer_store_dword v36, off, s[0:3], s33 offset:384 -; GCN-NEXT: buffer_store_dword v37, off, s[0:3], s33 offset:388 -; GCN-NEXT: buffer_store_dword v38, off, s[0:3], s33 offset:392 -; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:396 -; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:400 -; GCN-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:404 -; GCN-NEXT: buffer_store_dword v34, off, s[0:3], s33 offset:408 -; GCN-NEXT: buffer_store_dword v35, off, s[0:3], s33 offset:412 -; GCN-NEXT: buffer_store_dword v28, off, s[0:3], s33 offset:416 -; GCN-NEXT: buffer_store_dword v29, off, s[0:3], s33 offset:420 -; GCN-NEXT: buffer_store_dword v30, off, s[0:3], s33 offset:424 -; GCN-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:428 -; GCN-NEXT: buffer_store_dword v52, off, s[0:3], s33 offset:432 -; GCN-NEXT: buffer_store_dword v53, off, s[0:3], s33 offset:436 -; GCN-NEXT: buffer_store_dword v54, off, s[0:3], s33 offset:440 -; GCN-NEXT: buffer_store_dword v55, off, s[0:3], s33 offset:444 -; GCN-NEXT: buffer_store_dword v24, off, s[0:3], s33 offset:448 -; GCN-NEXT: buffer_store_dword v25, off, s[0:3], s33 offset:452 -; GCN-NEXT: buffer_store_dword v26, off, s[0:3], s33 offset:456 -; GCN-NEXT: buffer_store_dword v27, off, s[0:3], s33 offset:460 -; GCN-NEXT: buffer_load_dword v16, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v17, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s33 offset:544 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s33 offset:548 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s33 offset:552 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s33 offset:556 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s33 offset:560 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s33 offset:564 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s33 offset:568 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s33 offset:572 ; 4-byte Folded Reload -; GCN-NEXT: v_bfe_u32 v0, v6, 1, 6 -; GCN-NEXT: v_lshrrev_b32_e64 v2, 6, s33 +; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill +; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:240 +; GCN-NEXT: v_bfe_u32 v0, v2, 1, 6 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; GCN-NEXT: v_add_u32_e32 v2, 0x100, v2 -; GCN-NEXT: v_add_u32_e32 v0, v2, v0 -; GCN-NEXT: v_and_b32_e32 v1, 1, v6 +; GCN-NEXT: v_and_b32_e32 v1, 1, v2 ; GCN-NEXT: v_lshlrev_b32_e32 v1, 4, v1 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v16, v20 -; GCN-NEXT: v_mov_b32_e32 v17, v21 -; GCN-NEXT: v_mov_b32_e32 v18, v22 -; GCN-NEXT: v_mov_b32_e32 v19, v23 -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:464 -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:468 -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:472 -; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:476 -; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:480 -; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:484 -; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:488 -; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:492 -; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:496 -; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:500 -; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:504 -; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:508 +; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:256 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:260 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:264 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:268 +; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:272 +; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:276 +; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:280 +; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:284 +; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:288 +; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:292 +; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:296 +; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:300 +; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:304 +; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:308 +; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:312 +; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:316 +; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:320 +; GCN-NEXT: buffer_store_dword v20, off, s[0:3], s33 offset:324 +; GCN-NEXT: buffer_store_dword v21, off, s[0:3], s33 offset:328 +; GCN-NEXT: buffer_store_dword v22, off, s[0:3], s33 offset:332 +; GCN-NEXT: buffer_store_dword v23, off, s[0:3], s33 offset:336 +; GCN-NEXT: buffer_store_dword v24, off, s[0:3], s33 offset:340 +; GCN-NEXT: buffer_store_dword v25, off, s[0:3], s33 offset:344 +; GCN-NEXT: buffer_store_dword v26, off, s[0:3], s33 offset:348 +; GCN-NEXT: buffer_store_dword v27, off, s[0:3], s33 offset:352 +; GCN-NEXT: buffer_store_dword v28, off, s[0:3], s33 offset:356 +; GCN-NEXT: buffer_store_dword v29, off, s[0:3], s33 offset:360 +; GCN-NEXT: buffer_store_dword v30, off, s[0:3], s33 offset:364 +; GCN-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:368 +; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:372 +; GCN-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:376 +; GCN-NEXT: buffer_store_dword v34, off, s[0:3], s33 offset:380 +; GCN-NEXT: buffer_store_dword v35, off, s[0:3], s33 offset:384 +; GCN-NEXT: buffer_store_dword v36, off, s[0:3], s33 offset:388 +; GCN-NEXT: buffer_store_dword v37, off, s[0:3], s33 offset:392 +; GCN-NEXT: buffer_store_dword v38, off, s[0:3], s33 offset:396 +; GCN-NEXT: buffer_store_dword v48, off, s[0:3], s33 offset:400 +; GCN-NEXT: buffer_store_dword v49, off, s[0:3], s33 offset:404 +; GCN-NEXT: buffer_store_dword v50, off, s[0:3], s33 offset:408 +; GCN-NEXT: buffer_store_dword v51, off, s[0:3], s33 offset:412 +; GCN-NEXT: buffer_store_dword v52, off, s[0:3], s33 offset:416 +; GCN-NEXT: buffer_store_dword v53, off, s[0:3], s33 offset:420 +; GCN-NEXT: buffer_store_dword v54, off, s[0:3], s33 offset:424 +; GCN-NEXT: buffer_store_dword v55, off, s[0:3], s33 offset:428 +; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:432 +; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:436 +; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:440 +; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:444 +; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:448 +; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:452 +; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:456 +; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:460 +; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:464 +; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:468 +; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:472 +; GCN-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:476 +; GCN-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:480 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:484 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:488 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:492 +; GCN-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:496 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:500 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:504 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:508 +; GCN-NEXT: v_lshrrev_b32_e64 v3, 6, s33 +; GCN-NEXT: v_add_u32_e32 v3, 0x100, v3 +; GCN-NEXT: v_add_u32_e32 v0, v3, v0 ; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen ; GCN-NEXT: buffer_load_dword v63, off, s[0:3], s33 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload @@ -340,16 +301,15 @@ ; GCN-NEXT: buffer_load_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload ; GCN-NEXT: s_mov_b32 s33, s4 -; GCN-NEXT: s_waitcnt vmcnt(16) +; GCN-NEXT: s_waitcnt vmcnt(15) ; GCN-NEXT: v_lshrrev_b32_e32 v0, v1, v0 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] @@ -365,14 +325,13 @@ ; GCN-NEXT: s_mov_b32 s4, s33 ; GCN-NEXT: s_add_i32 s33, s32, 0x3fc0 ; GCN-NEXT: s_and_b32 s33, s33, 0xffffc000 -; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill @@ -381,133 +340,115 @@ ; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill ; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 ; 4-byte Folded Spill -; GCN-NEXT: v_mov_b32_e32 v6, v2 -; GCN-NEXT: global_load_dwordx4 v[2:5], v[0:1], off -; GCN-NEXT: global_load_dwordx4 v[16:19], v[0:1], off offset:16 -; GCN-NEXT: global_load_dwordx4 v[56:59], v[0:1], off offset:32 -; GCN-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:48 -; GCN-NEXT: global_load_dwordx4 v[20:23], v[0:1], off offset:64 -; GCN-NEXT: global_load_dwordx4 v[44:47], v[0:1], off offset:80 -; GCN-NEXT: global_load_dwordx4 v[40:43], v[0:1], off offset:96 -; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:112 -; GCN-NEXT: global_load_dwordx4 v[36:39], v[0:1], off offset:128 -; GCN-NEXT: global_load_dwordx4 v[32:35], v[0:1], off offset:144 -; GCN-NEXT: global_load_dwordx4 v[28:31], v[0:1], off offset:160 -; GCN-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:176 -; GCN-NEXT: global_load_dwordx4 v[24:27], v[0:1], off offset:192 -; GCN-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:208 +; GCN-NEXT: global_load_dwordx4 v[3:6], v[0:1], off +; GCN-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:16 +; GCN-NEXT: global_load_dwordx4 v[11:14], v[0:1], off offset:32 +; GCN-NEXT: global_load_dwordx4 v[15:18], v[0:1], off offset:48 +; GCN-NEXT: global_load_dwordx4 v[19:22], v[0:1], off offset:64 +; GCN-NEXT: global_load_dwordx4 v[23:26], v[0:1], off offset:80 +; GCN-NEXT: global_load_dwordx4 v[27:30], v[0:1], off offset:96 +; GCN-NEXT: global_load_dwordx4 v[31:34], v[0:1], off offset:112 +; GCN-NEXT: global_load_dwordx4 v[35:38], v[0:1], off offset:128 +; GCN-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:144 +; GCN-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:160 +; GCN-NEXT: global_load_dwordx4 v[39:42], v[0:1], off offset:176 +; GCN-NEXT: global_load_dwordx4 v[43:46], v[0:1], off offset:192 +; GCN-NEXT: global_load_dwordx4 v[56:59], v[0:1], off offset:208 +; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:224 ; GCN-NEXT: s_add_i32 s32, s32, 0x10000 ; GCN-NEXT: s_add_i32 s32, s32, 0xffff0000 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:528 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:544 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:548 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:552 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:556 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:560 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:564 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:568 ; 4-byte Folded Spill -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:572 ; 4-byte Folded Spill -; GCN-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:224 -; GCN-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:240 -; GCN-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:256 -; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:260 -; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:264 -; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:268 -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:272 -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:276 -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:280 -; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:284 -; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:288 -; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:292 -; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:296 -; GCN-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:300 -; GCN-NEXT: buffer_store_dword v48, off, s[0:3], s33 offset:304 -; GCN-NEXT: buffer_store_dword v49, off, s[0:3], s33 offset:308 -; GCN-NEXT: buffer_store_dword v50, off, s[0:3], s33 offset:312 -; GCN-NEXT: buffer_store_dword v51, off, s[0:3], s33 offset:316 -; GCN-NEXT: buffer_store_dword v20, off, s[0:3], s33 offset:320 -; GCN-NEXT: buffer_store_dword v21, off, s[0:3], s33 offset:324 -; GCN-NEXT: buffer_store_dword v22, off, s[0:3], s33 offset:328 -; GCN-NEXT: buffer_store_dword v23, off, s[0:3], s33 offset:332 -; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:336 -; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:340 -; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:344 -; GCN-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:348 -; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:352 -; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:356 -; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:360 -; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:364 -; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:368 -; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:372 -; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:376 -; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:380 -; GCN-NEXT: buffer_store_dword v36, off, s[0:3], s33 offset:384 -; GCN-NEXT: buffer_store_dword v37, off, s[0:3], s33 offset:388 -; GCN-NEXT: buffer_store_dword v38, off, s[0:3], s33 offset:392 -; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:396 -; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:400 -; GCN-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:404 -; GCN-NEXT: buffer_store_dword v34, off, s[0:3], s33 offset:408 -; GCN-NEXT: buffer_store_dword v35, off, s[0:3], s33 offset:412 -; GCN-NEXT: buffer_store_dword v28, off, s[0:3], s33 offset:416 -; GCN-NEXT: buffer_store_dword v29, off, s[0:3], s33 offset:420 -; GCN-NEXT: buffer_store_dword v30, off, s[0:3], s33 offset:424 -; GCN-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:428 -; GCN-NEXT: buffer_store_dword v52, off, s[0:3], s33 offset:432 -; GCN-NEXT: buffer_store_dword v53, off, s[0:3], s33 offset:436 -; GCN-NEXT: buffer_store_dword v54, off, s[0:3], s33 offset:440 -; GCN-NEXT: buffer_store_dword v55, off, s[0:3], s33 offset:444 -; GCN-NEXT: buffer_store_dword v24, off, s[0:3], s33 offset:448 -; GCN-NEXT: buffer_store_dword v25, off, s[0:3], s33 offset:452 -; GCN-NEXT: buffer_store_dword v26, off, s[0:3], s33 offset:456 -; GCN-NEXT: buffer_store_dword v27, off, s[0:3], s33 offset:460 -; GCN-NEXT: buffer_load_dword v16, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v17, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s33 offset:544 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s33 offset:548 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s33 offset:552 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s33 offset:556 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s33 offset:560 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s33 offset:564 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s33 offset:568 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s33 offset:572 ; 4-byte Folded Reload -; GCN-NEXT: v_and_b32_e32 v0, 31, v6 +; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:532 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:536 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:540 ; 4-byte Folded Spill +; GCN-NEXT: global_load_dwordx4 v[60:63], v[0:1], off offset:240 +; GCN-NEXT: v_and_b32_e32 v0, 31, v2 ; GCN-NEXT: v_lshrrev_b32_e64 v2, 6, s33 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GCN-NEXT: v_add_u32_e32 v2, 0x100, v2 ; GCN-NEXT: v_add_u32_e32 v1, v2, v0 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v16, v20 -; GCN-NEXT: v_mov_b32_e32 v17, v21 -; GCN-NEXT: v_mov_b32_e32 v18, v22 -; GCN-NEXT: v_mov_b32_e32 v19, v23 -; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:464 -; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:468 -; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:472 -; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:476 -; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:480 -; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:484 -; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:488 -; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:492 -; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:496 -; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:500 -; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:504 -; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:508 +; GCN-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:512 ; 4-byte Folded Spill +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:516 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v62, off, s[0:3], s33 offset:520 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v63, off, s[0:3], s33 offset:524 ; 4-byte Folded Spill +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:256 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:260 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:264 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:268 +; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:272 +; GCN-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:276 +; GCN-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:280 +; GCN-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:284 +; GCN-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:288 +; GCN-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:292 +; GCN-NEXT: buffer_store_dword v13, off, s[0:3], s33 offset:296 +; GCN-NEXT: buffer_store_dword v14, off, s[0:3], s33 offset:300 +; GCN-NEXT: buffer_store_dword v15, off, s[0:3], s33 offset:304 +; GCN-NEXT: buffer_store_dword v16, off, s[0:3], s33 offset:308 +; GCN-NEXT: buffer_store_dword v17, off, s[0:3], s33 offset:312 +; GCN-NEXT: buffer_store_dword v18, off, s[0:3], s33 offset:316 +; GCN-NEXT: buffer_store_dword v19, off, s[0:3], s33 offset:320 +; GCN-NEXT: buffer_store_dword v20, off, s[0:3], s33 offset:324 +; GCN-NEXT: buffer_store_dword v21, off, s[0:3], s33 offset:328 +; GCN-NEXT: buffer_store_dword v22, off, s[0:3], s33 offset:332 +; GCN-NEXT: buffer_store_dword v23, off, s[0:3], s33 offset:336 +; GCN-NEXT: buffer_store_dword v24, off, s[0:3], s33 offset:340 +; GCN-NEXT: buffer_store_dword v25, off, s[0:3], s33 offset:344 +; GCN-NEXT: buffer_store_dword v26, off, s[0:3], s33 offset:348 +; GCN-NEXT: buffer_store_dword v27, off, s[0:3], s33 offset:352 +; GCN-NEXT: buffer_store_dword v28, off, s[0:3], s33 offset:356 +; GCN-NEXT: buffer_store_dword v29, off, s[0:3], s33 offset:360 +; GCN-NEXT: buffer_store_dword v30, off, s[0:3], s33 offset:364 +; GCN-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:368 +; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:372 +; GCN-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:376 +; GCN-NEXT: buffer_store_dword v34, off, s[0:3], s33 offset:380 +; GCN-NEXT: buffer_store_dword v35, off, s[0:3], s33 offset:384 +; GCN-NEXT: buffer_store_dword v36, off, s[0:3], s33 offset:388 +; GCN-NEXT: buffer_store_dword v37, off, s[0:3], s33 offset:392 +; GCN-NEXT: buffer_store_dword v38, off, s[0:3], s33 offset:396 +; GCN-NEXT: buffer_store_dword v48, off, s[0:3], s33 offset:400 +; GCN-NEXT: buffer_store_dword v49, off, s[0:3], s33 offset:404 +; GCN-NEXT: buffer_store_dword v50, off, s[0:3], s33 offset:408 +; GCN-NEXT: buffer_store_dword v51, off, s[0:3], s33 offset:412 +; GCN-NEXT: buffer_store_dword v52, off, s[0:3], s33 offset:416 +; GCN-NEXT: buffer_store_dword v53, off, s[0:3], s33 offset:420 +; GCN-NEXT: buffer_store_dword v54, off, s[0:3], s33 offset:424 +; GCN-NEXT: buffer_store_dword v55, off, s[0:3], s33 offset:428 +; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:432 +; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:436 +; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:440 +; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:444 +; GCN-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:448 +; GCN-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:452 +; GCN-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:456 +; GCN-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:460 +; GCN-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:464 +; GCN-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:468 +; GCN-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:472 +; GCN-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:476 +; GCN-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:528 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:532 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:536 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:540 ; 4-byte Folded Reload +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:480 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:484 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:488 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:492 +; GCN-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:512 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:516 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:520 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:524 ; 4-byte Folded Reload +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:496 +; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:500 +; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:504 +; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:508 ; GCN-NEXT: buffer_load_dword v0, v1, s[0:3], 0 offen ; GCN-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:4 ; GCN-NEXT: buffer_load_dword v63, off, s[0:3], s33 ; 4-byte Folded Reload @@ -518,14 +459,13 @@ ; GCN-NEXT: buffer_load_dword v58, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v57, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload ; GCN-NEXT: buffer_load_dword v56, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v47, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v46, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v45, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v44, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload -; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:60 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Reload +; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Reload ; GCN-NEXT: s_mov_b32 s33, s4 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i128.ll @@ -50,15 +50,17 @@ ; GFX9-NEXT: s_set_gpr_idx_on s2, gpr_idx(SRC0) ; GFX9-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-NEXT: v_mov_b32_e32 v1, v3 -; GFX9-NEXT: v_mov_b32_e32 v18, v2 ; GFX9-NEXT: s_set_gpr_idx_off ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 +; GFX9-NEXT: s_set_gpr_idx_on s2, gpr_idx(SRC0) +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: s_set_gpr_idx_off ; GFX9-NEXT: v_readfirstlane_b32 s1, v1 ; GFX9-NEXT: s_set_gpr_idx_on s2, gpr_idx(SRC0) -; GFX9-NEXT: v_mov_b32_e32 v3, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v3 ; GFX9-NEXT: s_set_gpr_idx_off -; GFX9-NEXT: v_readfirstlane_b32 s2, v18 -; GFX9-NEXT: v_readfirstlane_b32 s3, v3 +; GFX9-NEXT: v_readfirstlane_b32 s2, v0 +; GFX9-NEXT: v_readfirstlane_b32 s3, v1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX8-LABEL: extractelement_vgpr_v4i128_sgpr_idx: @@ -160,237 +162,238 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[3:6], v[0:1], off ; GFX9-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:16 -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 1, v2 -; GFX9-NEXT: v_add_u32_e32 v16, 1, v2 +; GFX9-NEXT: global_load_dwordx4 v[11:14], v[0:1], off offset:32 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 1, v2 +; GFX9-NEXT: v_add_u32_e32 v16, 1, v15 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v15 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_cndmask_b32_e32 v17, v3, v5, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v18, v4, v6, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v16 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v2 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 6, v2 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], 7, v2 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_cndmask_b32_e64 v11, v3, v5, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v12, v4, v6, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v5, vcc +; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off offset:48 ; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 2, v2 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cndmask_b32_e32 v5, v11, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v6, v12, v8, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 2, v15 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_cndmask_b32_e32 v6, v17, v7, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v8, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 3, v2 -; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 3, v15 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v17, v10, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc -; GFX9-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:32 -; GFX9-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:48 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 4, v2 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 7, v16 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 4, v15 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v11, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v12, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 5, v2 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v12, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 5, v15 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 5, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v11, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v14, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 6, v15 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[6:7] -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v13, s[6:7] -; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v14, s[8:9] -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v15, s[8:9] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v14, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v4, v15, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 7, v15 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 7, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc ; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: extractelement_vgpr_v4i128_vgpr_idx: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v3, vcc, 16, v0 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GFX8-NEXT: flat_load_dwordx4 v[4:7], v[3:4] -; GFX8-NEXT: flat_load_dwordx4 v[8:11], v[0:1] -; GFX8-NEXT: v_lshlrev_b32_e32 v16, 1, v2 -; GFX8-NEXT: v_add_u32_e32 v17, vcc, 1, v16 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v17 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v16 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[6:7], 6, v16 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[8:9], 7, v16 -; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, v10, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v9, v11, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 2, v17 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 -; GFX8-NEXT: v_cndmask_b32_e32 v18, v2, v6, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v19, v3, v7, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 3, v17 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc -; GFX8-NEXT: v_add_u32_e32 v2, vcc, 32, v0 -; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc +; GFX8-NEXT: flat_load_dwordx4 v[3:6], v[0:1] +; GFX8-NEXT: v_add_u32_e32 v7, vcc, 16, v0 +; GFX8-NEXT: v_addc_u32_e32 v8, vcc, 0, v1, vcc +; GFX8-NEXT: flat_load_dwordx4 v[7:10], v[7:8] +; GFX8-NEXT: v_add_u32_e32 v11, vcc, 32, v0 +; GFX8-NEXT: v_addc_u32_e32 v12, vcc, 0, v1, vcc +; GFX8-NEXT: flat_load_dwordx4 v[11:14], v[11:12] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, 48, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v15, 1, v2 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_load_dwordx4 v[8:11], v[2:3] -; GFX8-NEXT: flat_load_dwordx4 v[12:15], v[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 7, v17 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, 1, v15 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v15 +; GFX8-NEXT: s_waitcnt vmcnt(2) +; GFX8-NEXT: v_cndmask_b32_e32 v17, v3, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v18, v4, v6, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v16 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v5, vcc +; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] +; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 2, v15 +; GFX8-NEXT: s_waitcnt vmcnt(2) +; GFX8-NEXT: v_cndmask_b32_e32 v6, v17, v7, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v17, v18, v8, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 3, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v7, v17, v10, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 4, v15 ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_cndmask_b32_e32 v0, v18, v8, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v1, v19, v9, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 4, v17 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v9, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v11, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v12, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v12, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 5, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 5, v16 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 5, v17 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 6, v17 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v14, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 6, v15 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[6:7] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v13, s[6:7] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v13, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v14, s[8:9] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v15, s[8:9] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v14, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v15, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v1, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 7, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 7, v16 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: extractelement_vgpr_v4i128_vgpr_idx: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: s_mov_b32 s10, 0 -; GFX7-NEXT: s_mov_b32 s11, 0xf000 -; GFX7-NEXT: s_mov_b64 s[8:9], 0 -; GFX7-NEXT: buffer_load_dwordx4 v[3:6], v[0:1], s[8:11], 0 addr64 -; GFX7-NEXT: buffer_load_dwordx4 v[7:10], v[0:1], s[8:11], 0 addr64 offset:16 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 1, v2 -; GFX7-NEXT: v_add_i32_e32 v16, vcc, 1, v2 +; GFX7-NEXT: s_mov_b32 s6, 0 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: buffer_load_dwordx4 v[3:6], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx4 v[7:10], v[0:1], s[4:7], 0 addr64 offset:16 +; GFX7-NEXT: buffer_load_dwordx4 v[11:14], v[0:1], s[4:7], 0 addr64 offset:32 +; GFX7-NEXT: v_lshlrev_b32_e32 v15, 1, v2 +; GFX7-NEXT: v_add_i32_e32 v16, vcc, 1, v15 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v15 +; GFX7-NEXT: s_waitcnt vmcnt(2) +; GFX7-NEXT: v_cndmask_b32_e32 v17, v3, v5, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v18, v4, v6, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v16 -; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v2 -; GFX7-NEXT: v_cmp_eq_u32_e64 s[6:7], 6, v2 -; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_cndmask_b32_e64 v11, v3, v5, s[4:5] -; GFX7-NEXT: v_cndmask_b32_e64 v12, v4, v6, s[4:5] -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v5, vcc +; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 offset:48 ; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 2, v2 -; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cndmask_b32_e32 v5, v11, v7, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v6, v12, v8, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 2, v15 +; GFX7-NEXT: s_waitcnt vmcnt(2) +; GFX7-NEXT: v_cndmask_b32_e32 v6, v17, v7, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v17, v18, v8, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 3, v2 -; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 3, v15 +; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v7, v17, v10, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc -; GFX7-NEXT: buffer_load_dwordx4 v[8:11], v[0:1], s[8:11], 0 addr64 offset:32 -; GFX7-NEXT: buffer_load_dwordx4 v[12:15], v[0:1], s[8:11], 0 addr64 offset:48 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 4, v2 -; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], 7, v16 -; GFX7-NEXT: v_cmp_eq_u32_e64 s[8:9], 7, v2 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 4, v15 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v9, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v11, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v12, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 5, v2 -; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v12, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 5, v15 +; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v14, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 5, v16 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v11, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v13, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v14, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 6, v15 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v12, vcc -; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[6:7] -; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v13, s[6:7] -; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc -; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v14, s[8:9] -; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v15, s[8:9] -; GFX7-NEXT: v_cndmask_b32_e64 v2, v3, v14, s[4:5] -; GFX7-NEXT: v_cndmask_b32_e64 v3, v4, v15, s[4:5] +; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v1, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 7, v15 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 7, v16 +; GFX7-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: extractelement_vgpr_v4i128_vgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: s_clause 0x1 -; GFX10-NEXT: global_load_dwordx4 v[8:11], v[0:1], off -; GFX10-NEXT: global_load_dwordx4 v[4:7], v[0:1], off offset:16 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 1, v2 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 1, v2 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v3 -; GFX10-NEXT: s_waitcnt vmcnt(1) -; GFX10-NEXT: v_cndmask_b32_e32 v12, v8, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v13, v9, v11, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v14, v8, v10, s4 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v2 -; GFX10-NEXT: v_cndmask_b32_e64 v15, v9, v11, s4 -; GFX10-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:32 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 2, v3 -; GFX10-NEXT: s_waitcnt vmcnt(1) -; GFX10-NEXT: v_cndmask_b32_e32 v12, v12, v4, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v13, v13, v5, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v2 -; GFX10-NEXT: v_cndmask_b32_e64 v4, v14, v4, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v15, v5, s4 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 3, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v16, v12, v6, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v17, v13, v7, vcc_lo -; GFX10-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:48 +; GFX10-NEXT: s_clause 0x3 +; GFX10-NEXT: global_load_dwordx4 v[3:6], v[0:1], off +; GFX10-NEXT: global_load_dwordx4 v[7:10], v[0:1], off offset:16 +; GFX10-NEXT: global_load_dwordx4 v[11:14], v[0:1], off offset:32 +; GFX10-NEXT: global_load_dwordx4 v[15:18], v[0:1], off offset:48 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 1, v2 +; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v1 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v19, v4, v6, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v5, s4 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v6, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v7, s4 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 4, v3 -; GFX10-NEXT: s_waitcnt vmcnt(1) -; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v8, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v9, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v2 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 2, v1 +; GFX10-NEXT: s_waitcnt vmcnt(2) +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v19, v8, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v7, s4 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v8, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v9, s4 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 5, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v2 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 3, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v10, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v9, s4 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v10, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v11, s4 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 6, v3 -; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v12, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v13, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v2 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 4, v1 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v12, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v11, s4 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v12, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v13, s4 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 7, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v14, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v15, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, v14, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v3, v5, v15, s4 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 5, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v13, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v14, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v13, s4 +; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v14, s4 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 6, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v15, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v16, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v15, s4 +; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v16, s4 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 7, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v3, v17, s4 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v4, v18, s4 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: extractelement_vgpr_v4i128_vgpr_idx: @@ -398,59 +401,58 @@ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_clause 0x3 -; GFX11-NEXT: global_load_b128 v[16:19], v[0:1], off -; GFX11-NEXT: global_load_b128 v[4:7], v[0:1], off offset:16 -; GFX11-NEXT: global_load_b128 v[8:11], v[0:1], off offset:32 -; GFX11-NEXT: global_load_b128 v[12:15], v[0:1], off offset:48 +; GFX11-NEXT: global_load_b128 v[3:6], v[0:1], off +; GFX11-NEXT: global_load_b128 v[7:10], v[0:1], off offset:16 +; GFX11-NEXT: global_load_b128 v[11:14], v[0:1], off offset:32 +; GFX11-NEXT: global_load_b128 v[15:18], v[0:1], off offset:48 ; GFX11-NEXT: v_lshlrev_b32_e32 v0, 1, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 ; GFX11-NEXT: s_waitcnt vmcnt(3) -; GFX11-NEXT: v_cndmask_b32_e32 v3, v17, v19, vcc_lo -; GFX11-NEXT: v_dual_cndmask_b32 v2, v16, v18 :: v_dual_add_nc_u32 v1, 1, v0 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 +; GFX11-NEXT: v_dual_cndmask_b32 v2, v3, v5 :: v_dual_add_nc_u32 v1, 1, v0 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v1 +; GFX11-NEXT: v_cndmask_b32_e32 v19, v4, v6, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v5, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v6, s0 ; GFX11-NEXT: s_waitcnt vmcnt(2) -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v3, v3, v5 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v16, v16, v18, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v17, v17, v19, s0 +; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 2, v1 -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v6 :: v_dual_cndmask_b32 v3, v3, v7 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_cndmask_b32_e64 v4, v16, v4, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v17, v5, s0 +; GFX11-NEXT: v_cndmask_b32_e32 v5, v19, v8, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v7, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v8, s0 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v1 +; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v9 :: v_dual_cndmask_b32 v5, v5, v10 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v9, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v10, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 4, v1 ; GFX11-NEXT: s_waitcnt vmcnt(1) -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v8 :: v_dual_cndmask_b32 v3, v3, v9 +; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v11 :: v_dual_cndmask_b32 v5, v5, v12 ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v6, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v7, s0 -; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 4, v1 -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v8, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v9, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v11, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v12, s0 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 5, v1 +; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v13 :: v_dual_cndmask_b32 v5, v5, v14 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v13, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v14, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 6, v1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v12 :: v_dual_cndmask_b32 v3, v3, v13 +; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v15 :: v_dual_cndmask_b32 v5, v5, v16 ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v10, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v11, s0 -; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 6, v1 -; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v14, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v12, s0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v13, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v15, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v16, s0 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 7, v1 -; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v15, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v4, v14, s0 +; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v17 :: v_dual_cndmask_b32 v1, v5, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v3, v17, s0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-NEXT: v_cndmask_b32_e64 v3, v5, v15, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v4, v18, s0 ; GFX11-NEXT: s_setpc_b64 s[30:31] %vector = load <4 x i128>, ptr addrspace(1) %ptr %element = extractelement <4 x i128> %vector, i32 %idx @@ -463,7 +465,6 @@ ; GFX9-NEXT: s_load_dwordx16 s[0:15], s[2:3], 0x0 ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 1, v0 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GFX9-NEXT: v_add_u32_e32 v19, 1, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s0 ; GFX9-NEXT: v_mov_b32_e32 v2, s1 @@ -494,38 +495,39 @@ ; GFX9-NEXT: v_cndmask_b32_e32 v17, v17, v11, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v18, v18, v12, vcc ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GFX9-NEXT: v_mov_b32_e32 v15, s14 +; GFX9-NEXT: v_mov_b32_e32 v16, s15 ; GFX9-NEXT: v_cndmask_b32_e32 v17, v17, v13, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v18, v18, v14, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v19 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 1, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v17, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v18, v18, v16, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v19 -; GFX9-NEXT: v_mov_b32_e32 v15, s14 -; GFX9-NEXT: v_mov_b32_e32 v16, s15 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v6, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 3, v19 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v17, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v3, v1, v7, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v18, v16, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 4, v19 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 5, v19 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 6, v19 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v13, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v14, vcc -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 7, v19 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v16, vcc -; GFX9-NEXT: v_readfirstlane_b32 s0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s1, v1 -; GFX9-NEXT: v_readfirstlane_b32 s2, v2 -; GFX9-NEXT: v_readfirstlane_b32 s3, v3 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v13, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v16, vcc +; GFX9-NEXT: v_readfirstlane_b32 s0, v17 +; GFX9-NEXT: v_readfirstlane_b32 s1, v18 +; GFX9-NEXT: v_readfirstlane_b32 s2, v0 +; GFX9-NEXT: v_readfirstlane_b32 s3, v1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX8-LABEL: extractelement_sgpr_v4i128_vgpr_idx: @@ -563,39 +565,39 @@ ; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v11, vcc ; GFX8-NEXT: v_cndmask_b32_e32 v18, v18, v12, vcc ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GFX8-NEXT: v_mov_b32_e32 v15, s14 +; GFX8-NEXT: v_mov_b32_e32 v16, s15 ; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v13, vcc ; GFX8-NEXT: v_cndmask_b32_e32 v18, v18, v14, vcc -; GFX8-NEXT: v_add_u32_e32 v19, vcc, 1, v0 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v19 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v15, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v18, v18, v16, vcc +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v0 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v19 -; GFX8-NEXT: v_mov_b32_e32 v15, s14 -; GFX8-NEXT: v_mov_b32_e32 v16, s15 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v6, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 3, v19 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v17, v15, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v3, v1, v7, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v18, v16, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 4, v19 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 5, v19 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc ; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 6, v19 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v13, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v4, v2, v14, vcc -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 7, v19 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v15, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v16, vcc -; GFX8-NEXT: v_readfirstlane_b32 s0, v0 -; GFX8-NEXT: v_readfirstlane_b32 s1, v1 -; GFX8-NEXT: v_readfirstlane_b32 s2, v2 -; GFX8-NEXT: v_readfirstlane_b32 s3, v3 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v13, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v15, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v16, vcc +; GFX8-NEXT: v_readfirstlane_b32 s0, v17 +; GFX8-NEXT: v_readfirstlane_b32 s1, v18 +; GFX8-NEXT: v_readfirstlane_b32 s2, v0 +; GFX8-NEXT: v_readfirstlane_b32 s3, v1 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX7-LABEL: extractelement_sgpr_v4i128_vgpr_idx: @@ -633,39 +635,39 @@ ; GFX7-NEXT: v_cndmask_b32_e32 v17, v17, v11, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v18, v18, v12, vcc ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GFX7-NEXT: v_mov_b32_e32 v15, s14 +; GFX7-NEXT: v_mov_b32_e32 v16, s15 ; GFX7-NEXT: v_cndmask_b32_e32 v17, v17, v13, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v18, v18, v14, vcc -; GFX7-NEXT: v_add_i32_e32 v19, vcc, 1, v0 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v19 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v17, v17, v15, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v18, v18, v16, vcc +; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v0 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v19 -; GFX7-NEXT: v_mov_b32_e32 v15, s14 -; GFX7-NEXT: v_mov_b32_e32 v16, s15 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 -; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[0:1] -; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, v6, s[0:1] -; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 3, v19 -; GFX7-NEXT: v_cndmask_b32_e32 v0, v17, v15, vcc -; GFX7-NEXT: v_cndmask_b32_e64 v3, v1, v7, s[0:1] -; GFX7-NEXT: v_cndmask_b32_e32 v1, v18, v16, vcc -; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1] -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 4, v19 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 5, v19 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc ; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 6, v19 -; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v13, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v14, vcc -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 7, v19 -; GFX7-NEXT: v_cndmask_b32_e32 v2, v3, v15, vcc -; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v16, vcc -; GFX7-NEXT: v_readfirstlane_b32 s0, v0 -; GFX7-NEXT: v_readfirstlane_b32 s1, v1 -; GFX7-NEXT: v_readfirstlane_b32 s2, v2 -; GFX7-NEXT: v_readfirstlane_b32 s3, v3 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v13, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v15, vcc +; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v16, vcc +; GFX7-NEXT: v_readfirstlane_b32 s0, v17 +; GFX7-NEXT: v_readfirstlane_b32 s1, v18 +; GFX7-NEXT: v_readfirstlane_b32 s2, v0 +; GFX7-NEXT: v_readfirstlane_b32 s3, v1 ; GFX7-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: extractelement_sgpr_v4i128_vgpr_idx: @@ -712,16 +714,16 @@ ; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s17, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s16, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s17, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v3, s17, s0 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 7, v1 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, s18, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, s19, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v4, s18, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v3, v5, s19, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s18, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s19, s0 -; GFX10-NEXT: v_readfirstlane_b32 s0, v0 -; GFX10-NEXT: v_readfirstlane_b32 s1, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s19, s0 +; GFX10-NEXT: v_readfirstlane_b32 s0, v1 +; GFX10-NEXT: v_readfirstlane_b32 s1, v3 ; GFX10-NEXT: v_readfirstlane_b32 s2, v2 -; GFX10-NEXT: v_readfirstlane_b32 s3, v3 +; GFX10-NEXT: v_readfirstlane_b32 s3, v0 ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: extractelement_sgpr_v4i128_vgpr_idx: @@ -773,18 +775,18 @@ ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3) ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s16, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s17, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v3, s17, s0 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 7, v1 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, s18, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, s19, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v4, s18, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v3, v5, s19, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s18, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s19, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s19, s0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-NEXT: v_readfirstlane_b32 s0, v0 -; GFX11-NEXT: v_readfirstlane_b32 s1, v1 +; GFX11-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 -; GFX11-NEXT: v_readfirstlane_b32 s3, v3 +; GFX11-NEXT: v_readfirstlane_b32 s3, v0 ; GFX11-NEXT: ; return to shader part epilog %vector = load <4 x i128>, ptr addrspace(4) %ptr %element = extractelement <4 x i128> %vector, i32 %idx diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll @@ -110,17 +110,13 @@ define amdgpu_ps float @dyn_extract_v8f32_s_v(<8 x float> inreg %vec, i32 %sel) { ; GCN-LABEL: dyn_extract_v8f32_s_v: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_mov_b32 s0, s2 -; GCN-NEXT: s_mov_b32 s1, s3 -; GCN-NEXT: s_mov_b32 s2, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: s_mov_b32 s3, s5 -; GCN-NEXT: v_mov_b32_e32 v3, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s4 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; GCN-NEXT: v_mov_b32_e32 v4, s3 +; GCN-NEXT: v_mov_b32_e32 v4, s5 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 ; GCN-NEXT: v_mov_b32_e32 v5, s6 @@ -140,29 +136,21 @@ ; ; GFX10PLUS-LABEL: dyn_extract_v8f32_s_v: ; GFX10PLUS: ; %bb.0: ; %entry -; GFX10PLUS-NEXT: s_mov_b32 s1, s3 +; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s3 ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s1 -; GFX10PLUS-NEXT: s_mov_b32 s0, s2 -; GFX10PLUS-NEXT: s_mov_b32 s2, s4 -; GFX10PLUS-NEXT: s_mov_b32 s3, s5 -; GFX10PLUS-NEXT: s_mov_b32 s4, s6 -; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10PLUS-NEXT: s_mov_b32 s5, s7 -; GFX10PLUS-NEXT: s_mov_b32 s6, s8 -; GFX10PLUS-NEXT: s_mov_b32 s7, s9 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s7, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s7, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s9, vcc_lo ; GFX10PLUS-NEXT: ; return to shader part epilog entry: %ext = extractelement <8 x float> %vec, i32 %sel @@ -306,46 +294,46 @@ ; GCN-LABEL: dyn_extract_v8i64_const_s_v: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: s_mov_b64 s[4:5], 1 -; GCN-NEXT: s_mov_b64 s[6:7], 2 -; GCN-NEXT: v_mov_b32_e32 v1, s4 -; GCN-NEXT: v_mov_b32_e32 v2, s5 -; GCN-NEXT: v_mov_b32_e32 v3, s6 -; GCN-NEXT: v_mov_b32_e32 v4, s7 -; GCN-NEXT: s_mov_b64 s[8:9], 3 +; GCN-NEXT: s_mov_b64 s[16:17], 2 +; GCN-NEXT: s_mov_b64 s[18:19], 1 +; GCN-NEXT: s_mov_b64 s[14:15], 3 +; GCN-NEXT: v_mov_b32_e32 v1, s18 +; GCN-NEXT: v_mov_b32_e32 v2, s19 +; GCN-NEXT: v_mov_b32_e32 v3, s16 +; GCN-NEXT: v_mov_b32_e32 v4, s17 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: v_mov_b32_e32 v5, s8 -; GCN-NEXT: v_mov_b32_e32 v6, s9 -; GCN-NEXT: s_mov_b64 s[10:11], 4 +; GCN-NEXT: s_mov_b64 s[12:13], 4 +; GCN-NEXT: v_mov_b32_e32 v5, s14 +; GCN-NEXT: v_mov_b32_e32 v6, s15 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; GCN-NEXT: v_mov_b32_e32 v7, s10 -; GCN-NEXT: v_mov_b32_e32 v8, s11 -; GCN-NEXT: s_mov_b64 s[12:13], 5 +; GCN-NEXT: s_mov_b64 s[10:11], 5 +; GCN-NEXT: v_mov_b32_e32 v7, s12 +; GCN-NEXT: v_mov_b32_e32 v8, s13 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 -; GCN-NEXT: s_mov_b64 s[14:15], 6 -; GCN-NEXT: v_mov_b32_e32 v9, s12 -; GCN-NEXT: v_mov_b32_e32 v10, s13 +; GCN-NEXT: s_mov_b64 s[8:9], 6 +; GCN-NEXT: v_mov_b32_e32 v9, s10 +; GCN-NEXT: v_mov_b32_e32 v10, s11 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 -; GCN-NEXT: s_mov_b64 s[16:17], 7 -; GCN-NEXT: v_mov_b32_e32 v11, s14 -; GCN-NEXT: v_mov_b32_e32 v12, s15 +; GCN-NEXT: s_mov_b64 s[6:7], 7 +; GCN-NEXT: v_mov_b32_e32 v11, s8 +; GCN-NEXT: v_mov_b32_e32 v12, s9 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 -; GCN-NEXT: s_mov_b64 s[18:19], 8 -; GCN-NEXT: v_mov_b32_e32 v13, s16 -; GCN-NEXT: v_mov_b32_e32 v14, s17 +; GCN-NEXT: s_mov_b64 s[4:5], 8 +; GCN-NEXT: v_mov_b32_e32 v13, s6 +; GCN-NEXT: v_mov_b32_e32 v14, s7 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v12, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 -; GCN-NEXT: v_mov_b32_e32 v15, s18 -; GCN-NEXT: v_mov_b32_e32 v16, s19 +; GCN-NEXT: v_mov_b32_e32 v15, s4 +; GCN-NEXT: v_mov_b32_e32 v16, s5 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v13, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 @@ -357,73 +345,73 @@ ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: s_mov_b64 s[6:7], 2 +; GFX10-NEXT: s_mov_b64 s[4:5], 2 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10-NEXT: v_mov_b32_e32 v1, s6 -; GFX10-NEXT: v_mov_b32_e32 v2, s7 -; GFX10-NEXT: s_mov_b64 s[4:5], 1 -; GFX10-NEXT: s_mov_b64 s[8:9], 3 -; GFX10-NEXT: s_mov_b64 s[10:11], 4 -; GFX10-NEXT: v_cndmask_b32_e32 v1, s4, v1, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v2, s5, v2, vcc_lo +; GFX10-NEXT: v_mov_b32_e32 v1, s4 +; GFX10-NEXT: v_mov_b32_e32 v2, s5 +; GFX10-NEXT: s_mov_b64 s[6:7], 1 +; GFX10-NEXT: s_mov_b64 s[4:5], 3 +; GFX10-NEXT: v_cndmask_b32_e32 v1, s6, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v2, s7, v2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10-NEXT: s_mov_b64 s[12:13], 5 -; GFX10-NEXT: s_mov_b64 s[14:15], 6 -; GFX10-NEXT: s_mov_b64 s[16:17], 7 -; GFX10-NEXT: s_mov_b64 s[18:19], 8 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo +; GFX10-NEXT: s_mov_b64 s[6:7], 4 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo +; GFX10-NEXT: s_mov_b64 s[4:5], 5 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo +; GFX10-NEXT: s_mov_b64 s[6:7], 6 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s14, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s15, vcc_lo +; GFX10-NEXT: s_mov_b64 s[4:5], 7 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s16, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s17, vcc_lo +; GFX10-NEXT: s_mov_b64 s[6:7], 8 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s18, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s19, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s6, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s7, vcc_lo ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: dyn_extract_v8i64_const_s_v: ; GFX11: ; %bb.0: ; %entry ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_mov_b64 s[2:3], 2 +; GFX11-NEXT: s_mov_b64 s[0:1], 2 ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3 -; GFX11-NEXT: s_mov_b64 s[0:1], 1 -; GFX11-NEXT: s_mov_b64 s[4:5], 3 -; GFX11-NEXT: s_mov_b64 s[6:7], 4 -; GFX11-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1 +; GFX11-NEXT: s_mov_b64 s[2:3], 1 +; GFX11-NEXT: s_mov_b64 s[0:1], 3 +; GFX11-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX11-NEXT: s_mov_b64 s[8:9], 5 -; GFX11-NEXT: s_mov_b64 s[10:11], 6 -; GFX11-NEXT: s_mov_b64 s[12:13], 7 -; GFX11-NEXT: s_mov_b64 s[14:15], 8 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo +; GFX11-NEXT: s_mov_b64 s[2:3], 4 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s1, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo +; GFX11-NEXT: s_mov_b64 s[0:1], 5 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo +; GFX11-NEXT: s_mov_b64 s[2:3], 6 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s1, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo +; GFX11-NEXT: s_mov_b64 s[0:1], 7 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo +; GFX11-NEXT: s_mov_b64 s[2:3], 8 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s1, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s14, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s15, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s3, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] entry: %ext = extractelement <8 x i64> , i32 %sel @@ -507,40 +495,28 @@ define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) { ; GPRIDX-LABEL: dyn_extract_v8i64_s_v: ; GPRIDX: ; %bb.0: ; %entry -; GPRIDX-NEXT: s_mov_b32 s0, s2 -; GPRIDX-NEXT: s_mov_b32 s1, s3 -; GPRIDX-NEXT: s_mov_b32 s2, s4 -; GPRIDX-NEXT: s_mov_b32 s3, s5 -; GPRIDX-NEXT: s_mov_b32 s4, s6 -; GPRIDX-NEXT: s_mov_b32 s5, s7 -; GPRIDX-NEXT: v_mov_b32_e32 v1, s0 -; GPRIDX-NEXT: v_mov_b32_e32 v2, s1 -; GPRIDX-NEXT: v_mov_b32_e32 v3, s2 -; GPRIDX-NEXT: v_mov_b32_e32 v4, s3 +; GPRIDX-NEXT: v_mov_b32_e32 v1, s2 +; GPRIDX-NEXT: v_mov_b32_e32 v2, s3 +; GPRIDX-NEXT: v_mov_b32_e32 v3, s4 +; GPRIDX-NEXT: v_mov_b32_e32 v4, s5 ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GPRIDX-NEXT: s_mov_b32 s6, s8 -; GPRIDX-NEXT: s_mov_b32 s7, s9 -; GPRIDX-NEXT: v_mov_b32_e32 v5, s4 -; GPRIDX-NEXT: v_mov_b32_e32 v6, s5 +; GPRIDX-NEXT: v_mov_b32_e32 v5, s6 +; GPRIDX-NEXT: v_mov_b32_e32 v6, s7 ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; GPRIDX-NEXT: s_mov_b32 s8, s10 -; GPRIDX-NEXT: s_mov_b32 s9, s11 -; GPRIDX-NEXT: v_mov_b32_e32 v7, s6 -; GPRIDX-NEXT: v_mov_b32_e32 v8, s7 +; GPRIDX-NEXT: v_mov_b32_e32 v7, s8 +; GPRIDX-NEXT: v_mov_b32_e32 v8, s9 ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 -; GPRIDX-NEXT: s_mov_b32 s10, s12 -; GPRIDX-NEXT: s_mov_b32 s11, s13 -; GPRIDX-NEXT: v_mov_b32_e32 v9, s8 -; GPRIDX-NEXT: v_mov_b32_e32 v10, s9 +; GPRIDX-NEXT: v_mov_b32_e32 v9, s10 +; GPRIDX-NEXT: v_mov_b32_e32 v10, s11 ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 -; GPRIDX-NEXT: v_mov_b32_e32 v11, s10 -; GPRIDX-NEXT: v_mov_b32_e32 v12, s11 +; GPRIDX-NEXT: v_mov_b32_e32 v11, s12 +; GPRIDX-NEXT: v_mov_b32_e32 v12, s13 ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 @@ -561,40 +537,28 @@ ; ; MOVREL-LABEL: dyn_extract_v8i64_s_v: ; MOVREL: ; %bb.0: ; %entry -; MOVREL-NEXT: s_mov_b32 s0, s2 -; MOVREL-NEXT: s_mov_b32 s1, s3 -; MOVREL-NEXT: s_mov_b32 s2, s4 -; MOVREL-NEXT: s_mov_b32 s3, s5 -; MOVREL-NEXT: s_mov_b32 s4, s6 -; MOVREL-NEXT: s_mov_b32 s5, s7 -; MOVREL-NEXT: v_mov_b32_e32 v1, s0 -; MOVREL-NEXT: v_mov_b32_e32 v2, s1 -; MOVREL-NEXT: v_mov_b32_e32 v3, s2 -; MOVREL-NEXT: v_mov_b32_e32 v4, s3 +; MOVREL-NEXT: v_mov_b32_e32 v1, s2 +; MOVREL-NEXT: v_mov_b32_e32 v2, s3 +; MOVREL-NEXT: v_mov_b32_e32 v3, s4 +; MOVREL-NEXT: v_mov_b32_e32 v4, s5 ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; MOVREL-NEXT: s_mov_b32 s6, s8 -; MOVREL-NEXT: s_mov_b32 s7, s9 -; MOVREL-NEXT: v_mov_b32_e32 v5, s4 -; MOVREL-NEXT: v_mov_b32_e32 v6, s5 +; MOVREL-NEXT: v_mov_b32_e32 v5, s6 +; MOVREL-NEXT: v_mov_b32_e32 v6, s7 ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; MOVREL-NEXT: s_mov_b32 s8, s10 -; MOVREL-NEXT: s_mov_b32 s9, s11 -; MOVREL-NEXT: v_mov_b32_e32 v7, s6 -; MOVREL-NEXT: v_mov_b32_e32 v8, s7 +; MOVREL-NEXT: v_mov_b32_e32 v7, s8 +; MOVREL-NEXT: v_mov_b32_e32 v8, s9 ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 -; MOVREL-NEXT: s_mov_b32 s10, s12 -; MOVREL-NEXT: s_mov_b32 s11, s13 -; MOVREL-NEXT: v_mov_b32_e32 v9, s8 -; MOVREL-NEXT: v_mov_b32_e32 v10, s9 +; MOVREL-NEXT: v_mov_b32_e32 v9, s10 +; MOVREL-NEXT: v_mov_b32_e32 v10, s11 ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 -; MOVREL-NEXT: v_mov_b32_e32 v11, s10 -; MOVREL-NEXT: v_mov_b32_e32 v12, s11 +; MOVREL-NEXT: v_mov_b32_e32 v11, s12 +; MOVREL-NEXT: v_mov_b32_e32 v12, s13 ; MOVREL-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; MOVREL-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc ; MOVREL-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 @@ -615,88 +579,56 @@ ; ; GFX10-LABEL: dyn_extract_v8i64_s_v: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s19, s5 -; GFX10-NEXT: v_mov_b32_e32 v1, s2 -; GFX10-NEXT: v_mov_b32_e32 v2, s19 +; GFX10-NEXT: v_mov_b32_e32 v1, s4 +; GFX10-NEXT: v_mov_b32_e32 v2, s5 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10-NEXT: s_mov_b32 s1, s3 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s5, s7 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: s_mov_b32 s8, s10 -; GFX10-NEXT: s_mov_b32 s9, s11 -; GFX10-NEXT: s_mov_b32 s10, s12 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10-NEXT: s_mov_b32 s11, s13 -; GFX10-NEXT: s_mov_b32 s12, s14 -; GFX10-NEXT: s_mov_b32 s13, s15 -; GFX10-NEXT: s_mov_b32 s14, s16 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX10-NEXT: s_mov_b32 s15, s17 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s14, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s15, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s14, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s15, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s17, vcc_lo ; GFX10-NEXT: global_store_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: dyn_extract_v8i64_s_v: ; GFX11: ; %bb.0: ; %entry -; GFX11-NEXT: s_mov_b32 s0, s2 -; GFX11-NEXT: s_mov_b32 s2, s4 -; GFX11-NEXT: s_mov_b32 s19, s5 -; GFX11-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s19 +; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5 ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: s_mov_b32 s1, s3 -; GFX11-NEXT: s_mov_b32 s4, s6 -; GFX11-NEXT: s_mov_b32 s5, s7 -; GFX11-NEXT: s_mov_b32 s6, s8 -; GFX11-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX11-NEXT: s_mov_b32 s7, s9 -; GFX11-NEXT: s_mov_b32 s8, s10 -; GFX11-NEXT: s_mov_b32 s9, s11 -; GFX11-NEXT: s_mov_b32 s10, s12 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX11-NEXT: s_mov_b32 s11, s13 -; GFX11-NEXT: s_mov_b32 s12, s14 -; GFX11-NEXT: s_mov_b32 s13, s15 -; GFX11-NEXT: s_mov_b32 s14, s16 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX11-NEXT: s_mov_b32 s15, s17 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s14, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s15, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s14, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s15, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s16, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s17, vcc_lo ; GFX11-NEXT: global_store_b64 v[0:1], v[0:1], off ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX11-NEXT: s_endpgm @@ -2183,10 +2115,8 @@ define amdgpu_ps float @dyn_extract_v6f32_s_v(<6 x float> inreg %vec, i32 %sel) { ; GCN-LABEL: dyn_extract_v6f32_s_v: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_mov_b32 s0, s2 -; GCN-NEXT: s_mov_b32 s1, s3 -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GCN-NEXT: v_mov_b32_e32 v3, s4 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc @@ -2205,23 +2135,17 @@ ; ; GFX10PLUS-LABEL: dyn_extract_v6f32_s_v: ; GFX10PLUS: ; %bb.0: ; %entry -; GFX10PLUS-NEXT: s_mov_b32 s1, s3 +; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s3 ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s1 -; GFX10PLUS-NEXT: s_mov_b32 s0, s2 -; GFX10PLUS-NEXT: s_mov_b32 s2, s4 -; GFX10PLUS-NEXT: s_mov_b32 s3, s5 -; GFX10PLUS-NEXT: s_mov_b32 s4, s6 -; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10PLUS-NEXT: s_mov_b32 s5, s7 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s5, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s7, vcc_lo ; GFX10PLUS-NEXT: ; return to shader part epilog entry: %ext = extractelement <6 x float> %vec, i32 %sel @@ -2335,13 +2259,10 @@ define amdgpu_ps float @dyn_extract_v7f32_s_v(<7 x float> inreg %vec, i32 %sel) { ; GCN-LABEL: dyn_extract_v7f32_s_v: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_mov_b32 s0, s2 -; GCN-NEXT: s_mov_b32 s1, s3 -; GCN-NEXT: s_mov_b32 s2, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: v_mov_b32_e32 v3, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s4 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 ; GCN-NEXT: v_mov_b32_e32 v4, s5 @@ -2361,26 +2282,19 @@ ; ; GFX10PLUS-LABEL: dyn_extract_v7f32_s_v: ; GFX10PLUS: ; %bb.0: ; %entry -; GFX10PLUS-NEXT: s_mov_b32 s1, s3 +; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s3 ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s1 -; GFX10PLUS-NEXT: s_mov_b32 s0, s2 -; GFX10PLUS-NEXT: s_mov_b32 s2, s4 -; GFX10PLUS-NEXT: s_mov_b32 s3, s5 -; GFX10PLUS-NEXT: s_mov_b32 s4, s6 -; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10PLUS-NEXT: s_mov_b32 s5, s7 -; GFX10PLUS-NEXT: s_mov_b32 s6, s8 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s7, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s6, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s8, vcc_lo ; GFX10PLUS-NEXT: ; return to shader part epilog entry: %ext = extractelement <7 x float> %vec, i32 %sel @@ -2506,26 +2420,18 @@ define amdgpu_ps double @dyn_extract_v6f64_s_v(<6 x double> inreg %vec, i32 %sel) { ; GCN-LABEL: dyn_extract_v6f64_s_v: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_mov_b32 s0, s2 -; GCN-NEXT: s_mov_b32 s1, s3 -; GCN-NEXT: s_mov_b32 s2, s4 -; GCN-NEXT: s_mov_b32 s3, s5 -; GCN-NEXT: s_mov_b32 s4, s6 -; GCN-NEXT: s_mov_b32 s5, s7 -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s1 -; GCN-NEXT: v_mov_b32_e32 v3, s2 -; GCN-NEXT: v_mov_b32_e32 v4, s3 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 +; GCN-NEXT: v_mov_b32_e32 v3, s4 +; GCN-NEXT: v_mov_b32_e32 v4, s5 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: s_mov_b32 s6, s8 -; GCN-NEXT: s_mov_b32 s7, s9 -; GCN-NEXT: v_mov_b32_e32 v5, s4 -; GCN-NEXT: v_mov_b32_e32 v6, s5 +; GCN-NEXT: v_mov_b32_e32 v5, s6 +; GCN-NEXT: v_mov_b32_e32 v6, s7 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; GCN-NEXT: v_mov_b32_e32 v7, s6 -; GCN-NEXT: v_mov_b32_e32 v8, s7 +; GCN-NEXT: v_mov_b32_e32 v7, s8 +; GCN-NEXT: v_mov_b32_e32 v8, s9 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 @@ -2547,69 +2453,45 @@ ; ; GFX10-LABEL: dyn_extract_v6f64_s_v: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s15, s5 -; GFX10-NEXT: v_mov_b32_e32 v1, s2 -; GFX10-NEXT: v_mov_b32_e32 v2, s15 +; GFX10-NEXT: v_mov_b32_e32 v1, s4 +; GFX10-NEXT: v_mov_b32_e32 v2, s5 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10-NEXT: s_mov_b32 s1, s3 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s5, s7 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: s_mov_b32 s8, s10 -; GFX10-NEXT: s_mov_b32 s9, s11 -; GFX10-NEXT: s_mov_b32 s10, s12 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10-NEXT: s_mov_b32 s11, s13 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s11, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s12, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s13, vcc_lo ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: dyn_extract_v6f64_s_v: ; GFX11: ; %bb.0: ; %entry -; GFX11-NEXT: s_mov_b32 s0, s2 -; GFX11-NEXT: s_mov_b32 s2, s4 -; GFX11-NEXT: s_mov_b32 s15, s5 -; GFX11-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s15 +; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5 ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: s_mov_b32 s1, s3 -; GFX11-NEXT: s_mov_b32 s4, s6 -; GFX11-NEXT: s_mov_b32 s5, s7 -; GFX11-NEXT: s_mov_b32 s6, s8 -; GFX11-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX11-NEXT: s_mov_b32 s7, s9 -; GFX11-NEXT: s_mov_b32 s8, s10 -; GFX11-NEXT: s_mov_b32 s9, s11 -; GFX11-NEXT: s_mov_b32 s10, s12 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX11-NEXT: s_mov_b32 s11, s13 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s10, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s11, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s12, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s13, vcc_lo ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: ; return to shader part epilog @@ -2758,33 +2640,23 @@ define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel) { ; GCN-LABEL: dyn_extract_v7f64_s_v: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_mov_b32 s0, s2 -; GCN-NEXT: s_mov_b32 s1, s3 -; GCN-NEXT: s_mov_b32 s2, s4 -; GCN-NEXT: s_mov_b32 s3, s5 -; GCN-NEXT: s_mov_b32 s4, s6 -; GCN-NEXT: s_mov_b32 s5, s7 -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s1 -; GCN-NEXT: v_mov_b32_e32 v3, s2 -; GCN-NEXT: v_mov_b32_e32 v4, s3 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 +; GCN-NEXT: v_mov_b32_e32 v3, s4 +; GCN-NEXT: v_mov_b32_e32 v4, s5 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: s_mov_b32 s6, s8 -; GCN-NEXT: s_mov_b32 s7, s9 -; GCN-NEXT: v_mov_b32_e32 v5, s4 -; GCN-NEXT: v_mov_b32_e32 v6, s5 +; GCN-NEXT: v_mov_b32_e32 v5, s6 +; GCN-NEXT: v_mov_b32_e32 v6, s7 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; GCN-NEXT: s_mov_b32 s8, s10 -; GCN-NEXT: s_mov_b32 s9, s11 -; GCN-NEXT: v_mov_b32_e32 v7, s6 -; GCN-NEXT: v_mov_b32_e32 v8, s7 +; GCN-NEXT: v_mov_b32_e32 v7, s8 +; GCN-NEXT: v_mov_b32_e32 v8, s9 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 -; GCN-NEXT: v_mov_b32_e32 v9, s8 -; GCN-NEXT: v_mov_b32_e32 v10, s9 +; GCN-NEXT: v_mov_b32_e32 v9, s10 +; GCN-NEXT: v_mov_b32_e32 v10, s11 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 @@ -2806,79 +2678,51 @@ ; ; GFX10-LABEL: dyn_extract_v7f64_s_v: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s19, s5 -; GFX10-NEXT: v_mov_b32_e32 v1, s2 -; GFX10-NEXT: v_mov_b32_e32 v2, s19 +; GFX10-NEXT: v_mov_b32_e32 v1, s4 +; GFX10-NEXT: v_mov_b32_e32 v2, s5 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10-NEXT: s_mov_b32 s1, s3 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s5, s7 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: s_mov_b32 s8, s10 -; GFX10-NEXT: s_mov_b32 s9, s11 -; GFX10-NEXT: s_mov_b32 s10, s12 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10-NEXT: s_mov_b32 s11, s13 -; GFX10-NEXT: s_mov_b32 s12, s14 -; GFX10-NEXT: s_mov_b32 s13, s15 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s12, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s13, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s14, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s15, vcc_lo ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: dyn_extract_v7f64_s_v: ; GFX11: ; %bb.0: ; %entry -; GFX11-NEXT: s_mov_b32 s0, s2 -; GFX11-NEXT: s_mov_b32 s2, s4 -; GFX11-NEXT: s_mov_b32 s19, s5 -; GFX11-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s19 +; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5 ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: s_mov_b32 s1, s3 -; GFX11-NEXT: s_mov_b32 s4, s6 -; GFX11-NEXT: s_mov_b32 s5, s7 -; GFX11-NEXT: s_mov_b32 s6, s8 -; GFX11-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e32 v2, s1, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v2, s3, v2, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX11-NEXT: s_mov_b32 s7, s9 -; GFX11-NEXT: s_mov_b32 s8, s10 -; GFX11-NEXT: s_mov_b32 s9, s11 -; GFX11-NEXT: s_mov_b32 s10, s12 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s5, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX11-NEXT: s_mov_b32 s11, s13 -; GFX11-NEXT: s_mov_b32 s12, s14 -; GFX11-NEXT: s_mov_b32 s13, s15 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s7, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s9, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s11, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s13, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s12, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s13, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s14, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, s15, vcc_lo ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: ; return to shader part epilog @@ -3545,45 +3389,34 @@ define amdgpu_ps float @dyn_extract_v15f32_s_v(<15 x float> inreg %vec, i32 %sel) { ; GCN-LABEL: dyn_extract_v15f32_s_v: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_mov_b32 s0, s2 -; GCN-NEXT: s_mov_b32 s1, s3 -; GCN-NEXT: s_mov_b32 s2, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: v_mov_b32_e32 v2, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 -; GCN-NEXT: s_mov_b32 s3, s5 -; GCN-NEXT: v_mov_b32_e32 v3, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s4 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 -; GCN-NEXT: s_mov_b32 s4, s6 -; GCN-NEXT: v_mov_b32_e32 v4, s3 +; GCN-NEXT: v_mov_b32_e32 v4, s5 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0 -; GCN-NEXT: s_mov_b32 s5, s7 -; GCN-NEXT: v_mov_b32_e32 v5, s4 +; GCN-NEXT: v_mov_b32_e32 v5, s6 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 4, v0 -; GCN-NEXT: s_mov_b32 s6, s8 -; GCN-NEXT: v_mov_b32_e32 v6, s5 +; GCN-NEXT: v_mov_b32_e32 v6, s7 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 5, v0 -; GCN-NEXT: s_mov_b32 s7, s9 -; GCN-NEXT: v_mov_b32_e32 v7, s6 +; GCN-NEXT: v_mov_b32_e32 v7, s8 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 6, v0 -; GCN-NEXT: s_mov_b32 s8, s10 -; GCN-NEXT: v_mov_b32_e32 v8, s7 +; GCN-NEXT: v_mov_b32_e32 v8, s9 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 7, v0 -; GCN-NEXT: s_mov_b32 s9, s11 -; GCN-NEXT: v_mov_b32_e32 v9, s8 +; GCN-NEXT: v_mov_b32_e32 v9, s10 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 8, v0 -; GCN-NEXT: s_mov_b32 s10, s12 -; GCN-NEXT: v_mov_b32_e32 v10, s9 +; GCN-NEXT: v_mov_b32_e32 v10, s11 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 9, v0 -; GCN-NEXT: v_mov_b32_e32 v11, s10 +; GCN-NEXT: v_mov_b32_e32 v11, s12 ; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 10, v0 ; GCN-NEXT: v_mov_b32_e32 v12, s13 @@ -3603,50 +3436,35 @@ ; ; GFX10PLUS-LABEL: dyn_extract_v15f32_s_v: ; GFX10PLUS: ; %bb.0: ; %entry -; GFX10PLUS-NEXT: s_mov_b32 s1, s3 +; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s3 ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s1 -; GFX10PLUS-NEXT: s_mov_b32 s0, s2 -; GFX10PLUS-NEXT: s_mov_b32 s2, s4 -; GFX10PLUS-NEXT: s_mov_b32 s3, s5 -; GFX10PLUS-NEXT: s_mov_b32 s4, s6 -; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s0, v1, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e32 v1, s2, v1, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 -; GFX10PLUS-NEXT: s_mov_b32 s5, s7 -; GFX10PLUS-NEXT: s_mov_b32 s6, s8 -; GFX10PLUS-NEXT: s_mov_b32 s7, s9 -; GFX10PLUS-NEXT: s_mov_b32 s8, s10 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s2, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10PLUS-NEXT: s_mov_b32 s9, s11 -; GFX10PLUS-NEXT: s_mov_b32 s10, s12 -; GFX10PLUS-NEXT: s_mov_b32 s11, s13 -; GFX10PLUS-NEXT: s_mov_b32 s12, s14 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 -; GFX10PLUS-NEXT: s_mov_b32 s13, s15 -; GFX10PLUS-NEXT: s_mov_b32 s14, s16 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s4, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s5, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s7, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s6, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s7, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s9, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 8, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 9, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s9, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s11, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 10, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 11, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s11, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s13, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 12, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s12, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s14, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 13, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s13, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s15, vcc_lo ; GFX10PLUS-NEXT: v_cmp_eq_u32_e32 vcc_lo, 14, v0 -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s14, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v1, s16, vcc_lo ; GFX10PLUS-NEXT: ; return to shader part epilog entry: %ext = extractelement <15 x float> %vec, i32 %sel @@ -4664,9 +4482,9 @@ ; GPRIDX-LABEL: v_extract_v64i32_7: ; GPRIDX: ; %bb.0: ; GPRIDX-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GPRIDX-NEXT: global_load_dwordx4 v[4:7], v[0:1], off offset:16 +; GPRIDX-NEXT: global_load_dwordx4 v[0:3], v[0:1], off offset:16 ; GPRIDX-NEXT: s_waitcnt vmcnt(0) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v7 +; GPRIDX-NEXT: v_mov_b32_e32 v0, v3 ; GPRIDX-NEXT: s_setpc_b64 s[30:31] ; ; MOVREL-LABEL: v_extract_v64i32_7: @@ -4674,27 +4492,27 @@ ; MOVREL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; MOVREL-NEXT: v_add_u32_e32 v0, vcc, 16, v0 ; MOVREL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; MOVREL-NEXT: flat_load_dwordx4 v[4:7], v[0:1] +; MOVREL-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; MOVREL-NEXT: s_waitcnt vmcnt(0) -; MOVREL-NEXT: v_mov_b32_e32 v0, v7 +; MOVREL-NEXT: v_mov_b32_e32 v0, v3 ; MOVREL-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_extract_v64i32_7: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: global_load_dwordx4 v[4:7], v[0:1], off offset:16 +; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off offset:16 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v0, v7 +; GFX10-NEXT: v_mov_b32_e32 v0, v3 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: v_extract_v64i32_7: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: global_load_b128 v[4:7], v[0:1], off offset:16 +; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off offset:16 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v0, v7 +; GFX11-NEXT: v_mov_b32_e32 v0, v3 ; GFX11-NEXT: s_setpc_b64 s[30:31] %vec = load <64 x i32>, ptr addrspace(1) %ptr %elt = extractelement <64 x i32> %vec, i32 7 @@ -4789,9 +4607,9 @@ ; GPRIDX-LABEL: v_extract_v64i32_37: ; GPRIDX: ; %bb.0: ; GPRIDX-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GPRIDX-NEXT: global_load_dwordx4 v[4:7], v[0:1], off offset:144 +; GPRIDX-NEXT: global_load_dwordx4 v[0:3], v[0:1], off offset:144 ; GPRIDX-NEXT: s_waitcnt vmcnt(0) -; GPRIDX-NEXT: v_mov_b32_e32 v0, v5 +; GPRIDX-NEXT: v_mov_b32_e32 v0, v1 ; GPRIDX-NEXT: s_setpc_b64 s[30:31] ; ; MOVREL-LABEL: v_extract_v64i32_37: @@ -4799,27 +4617,27 @@ ; MOVREL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; MOVREL-NEXT: v_add_u32_e32 v0, vcc, 0x90, v0 ; MOVREL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; MOVREL-NEXT: flat_load_dwordx4 v[4:7], v[0:1] +; MOVREL-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; MOVREL-NEXT: s_waitcnt vmcnt(0) -; MOVREL-NEXT: v_mov_b32_e32 v0, v5 +; MOVREL-NEXT: v_mov_b32_e32 v0, v1 ; MOVREL-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_extract_v64i32_37: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: global_load_dwordx4 v[4:7], v[0:1], off offset:144 +; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off offset:144 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: v_extract_v64i32_37: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: global_load_b128 v[4:7], v[0:1], off offset:144 +; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off offset:144 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v0, v1 ; GFX11-NEXT: s_setpc_b64 s[30:31] %vec = load <64 x i32>, ptr addrspace(1) %ptr %elt = extractelement <64 x i32> %vec, i32 37 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll @@ -3238,6 +3238,10 @@ ; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], s12, 5 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], s12, 6 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[10:11], s12, 7 +; GFX9-NEXT: v_mov_b32_e32 v10, 0 +; GFX9-NEXT: v_mov_b32_e32 v11, 0 +; GFX9-NEXT: v_mov_b32_e32 v12, 16 +; GFX9-NEXT: v_mov_b32_e32 v13, 0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[0:1] @@ -3247,22 +3251,18 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[6:7] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[8:9] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[10:11] -; GFX9-NEXT: v_and_or_b32 v10, v1, s13, v0 +; GFX9-NEXT: v_and_or_b32 v14, v1, s13, v0 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], s12, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v10, s[12:13] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, v10, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v10, s[2:3] -; GFX9-NEXT: v_cndmask_b32_e64 v4, v6, v10, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v7, v10, s[6:7] -; GFX9-NEXT: v_cndmask_b32_e64 v6, v8, v10, s[8:9] -; GFX9-NEXT: v_cndmask_b32_e64 v7, v9, v10, s[10:11] -; GFX9-NEXT: v_mov_b32_e32 v8, 0 -; GFX9-NEXT: v_mov_b32_e32 v9, 0 -; GFX9-NEXT: v_mov_b32_e32 v10, 16 -; GFX9-NEXT: v_mov_b32_e32 v11, 0 -; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off -; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v14, s[12:13] +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v14, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v14, s[2:3] +; GFX9-NEXT: v_cndmask_b32_e64 v4, v6, v14, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v7, v14, s[6:7] +; GFX9-NEXT: v_cndmask_b32_e64 v6, v8, v14, s[8:9] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v9, v14, s[10:11] +; GFX9-NEXT: global_store_dwordx4 v[10:11], v[0:3], off +; GFX9-NEXT: global_store_dwordx4 v[12:13], v[4:7], off ; GFX9-NEXT: s_endpgm ; ; GFX8-LABEL: insertelement_v_v16i16_s_s: @@ -3376,52 +3376,52 @@ ; GFX9-LABEL: insertelement_s_v16i16_v_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx8 s[8:15], s[2:3], 0x0 -; GFX9-NEXT: s_lshr_b32 s2, s4, 1 -; GFX9-NEXT: s_cmp_eq_u32 s2, 1 +; GFX9-NEXT: s_lshr_b32 s0, s4, 1 +; GFX9-NEXT: s_cmp_eq_u32 s0, 1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_cselect_b32 s0, s9, s8 -; GFX9-NEXT: s_cmp_eq_u32 s2, 2 -; GFX9-NEXT: s_cselect_b32 s0, s10, s0 -; GFX9-NEXT: s_cmp_eq_u32 s2, 3 -; GFX9-NEXT: s_cselect_b32 s0, s11, s0 -; GFX9-NEXT: s_cmp_eq_u32 s2, 4 -; GFX9-NEXT: s_cselect_b32 s0, s12, s0 -; GFX9-NEXT: s_cmp_eq_u32 s2, 5 -; GFX9-NEXT: s_cselect_b32 s0, s13, s0 -; GFX9-NEXT: s_cmp_eq_u32 s2, 6 -; GFX9-NEXT: s_cselect_b32 s0, s14, s0 -; GFX9-NEXT: s_cmp_eq_u32 s2, 7 -; GFX9-NEXT: s_cselect_b32 s0, s15, s0 -; GFX9-NEXT: s_and_b32 s1, s4, 1 -; GFX9-NEXT: s_lshl_b32 s1, s1, 4 -; GFX9-NEXT: s_lshl_b32 s3, 0xffff, s1 -; GFX9-NEXT: s_andn2_b32 s0, s0, s3 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 -; GFX9-NEXT: v_lshl_or_b32 v8, v0, s1, v1 +; GFX9-NEXT: s_cselect_b32 s1, s9, s8 +; GFX9-NEXT: s_cmp_eq_u32 s0, 2 +; GFX9-NEXT: s_cselect_b32 s1, s10, s1 +; GFX9-NEXT: s_cmp_eq_u32 s0, 3 +; GFX9-NEXT: s_cselect_b32 s1, s11, s1 +; GFX9-NEXT: s_cmp_eq_u32 s0, 4 +; GFX9-NEXT: s_cselect_b32 s1, s12, s1 +; GFX9-NEXT: s_cmp_eq_u32 s0, 5 +; GFX9-NEXT: s_cselect_b32 s1, s13, s1 +; GFX9-NEXT: s_cmp_eq_u32 s0, 6 +; GFX9-NEXT: s_cselect_b32 s1, s14, s1 +; GFX9-NEXT: s_cmp_eq_u32 s0, 7 +; GFX9-NEXT: s_cselect_b32 s1, s15, s1 +; GFX9-NEXT: s_and_b32 s2, s4, 1 +; GFX9-NEXT: s_lshl_b32 s2, s2, 4 +; GFX9-NEXT: s_lshl_b32 s3, 0xffff, s2 +; GFX9-NEXT: s_andn2_b32 s1, s1, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_lshl_or_b32 v8, v0, s2, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, s8 ; GFX9-NEXT: v_mov_b32_e32 v1, s9 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 1 ; GFX9-NEXT: v_mov_b32_e32 v2, s10 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 2 ; GFX9-NEXT: v_mov_b32_e32 v3, s11 ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 3 -; GFX9-NEXT: v_mov_b32_e32 v5, s13 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 3 +; GFX9-NEXT: v_mov_b32_e32 v4, s12 ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 5 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 4 +; GFX9-NEXT: v_mov_b32_e32 v5, s13 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 5 ; GFX9-NEXT: v_mov_b32_e32 v6, s14 ; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 6 -; GFX9-NEXT: v_mov_b32_e32 v4, s12 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 6 ; GFX9-NEXT: v_mov_b32_e32 v7, s15 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s2, 4 ; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 7 -; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s0, 7 ; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc ; GFX9-NEXT: v_mov_b32_e32 v8, 0 ; GFX9-NEXT: v_mov_b32_e32 v9, 0 @@ -4161,31 +4161,31 @@ ; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v0 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v0 ; GFX9-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX9-NEXT: v_mov_b32_e32 v11, 0 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v0 +; GFX9-NEXT: v_mov_b32_e32 v12, 0 +; GFX9-NEXT: v_mov_b32_e32 v13, 16 +; GFX9-NEXT: v_mov_b32_e32 v14, 0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_cndmask_b32_e32 v11, v3, v4, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v6, s[2:3] +; GFX9-NEXT: v_cndmask_b32_e32 v15, v3, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v5, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v6, s[2:3] ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v7, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v8, s[6:7] -; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v9, s[8:9] -; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v10, s[10:11] -; GFX9-NEXT: v_and_or_b32 v11, v11, v1, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7] -; GFX9-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9] -; GFX9-NEXT: v_mov_b32_e32 v8, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11] -; GFX9-NEXT: v_mov_b32_e32 v9, 0 -; GFX9-NEXT: v_mov_b32_e32 v10, 16 -; GFX9-NEXT: v_mov_b32_e32 v11, 0 -; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off -; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off +; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v7, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v8, s[6:7] +; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v9, s[8:9] +; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v10, s[10:11] +; GFX9-NEXT: v_and_or_b32 v15, v15, v1, v2 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v15, s[12:13] +; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v15, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v15, s[2:3] +; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v15, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v15, s[6:7] +; GFX9-NEXT: v_cndmask_b32_e64 v6, v9, v15, s[8:9] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v10, v15, s[10:11] +; GFX9-NEXT: global_store_dwordx4 v[11:12], v[0:3], off +; GFX9-NEXT: global_store_dwordx4 v[13:14], v[4:7], off ; GFX9-NEXT: s_endpgm ; ; GFX8-LABEL: insertelement_v_v16i16_s_v: @@ -4209,32 +4209,32 @@ ; GFX8-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v0 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v0 ; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX8-NEXT: v_mov_b32_e32 v11, 0 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v0 +; GFX8-NEXT: v_mov_b32_e32 v12, 0 +; GFX8-NEXT: v_mov_b32_e32 v13, 16 +; GFX8-NEXT: v_mov_b32_e32 v14, 0 ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_cndmask_b32_e32 v11, v3, v4, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v5, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v6, s[2:3] +; GFX8-NEXT: v_cndmask_b32_e32 v15, v3, v4, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v5, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v6, s[2:3] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v7, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v8, s[6:7] -; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v9, s[8:9] -; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v10, s[10:11] -; GFX8-NEXT: v_and_b32_e32 v1, v11, v1 -; GFX8-NEXT: v_or_b32_e32 v11, v1, v2 -; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3] -; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7] -; GFX8-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9] -; GFX8-NEXT: v_mov_b32_e32 v8, 0 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11] -; GFX8-NEXT: v_mov_b32_e32 v9, 0 -; GFX8-NEXT: v_mov_b32_e32 v10, 16 -; GFX8-NEXT: v_mov_b32_e32 v11, 0 -; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3] -; GFX8-NEXT: flat_store_dwordx4 v[10:11], v[4:7] +; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v7, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v8, s[6:7] +; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v9, s[8:9] +; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v10, s[10:11] +; GFX8-NEXT: v_and_b32_e32 v1, v15, v1 +; GFX8-NEXT: v_or_b32_e32 v15, v1, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, v15, s[12:13] +; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v15, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v15, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v15, s[2:3] +; GFX8-NEXT: v_cndmask_b32_e64 v4, v7, v15, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v15, s[6:7] +; GFX8-NEXT: v_cndmask_b32_e64 v6, v9, v15, s[8:9] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v10, v15, s[10:11] +; GFX8-NEXT: flat_store_dwordx4 v[11:12], v[0:3] +; GFX8-NEXT: flat_store_dwordx4 v[13:14], v[4:7] ; GFX8-NEXT: s_endpgm ; ; GFX7-LABEL: insertelement_v_v16i16_s_v: @@ -4405,6 +4405,10 @@ ; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], s12, 5 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], s12, 6 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[10:11], s12, 7 +; GFX9-NEXT: v_mov_b32_e32 v11, 0 +; GFX9-NEXT: v_mov_b32_e32 v12, 0 +; GFX9-NEXT: v_mov_b32_e32 v13, 16 +; GFX9-NEXT: v_mov_b32_e32 v14, 0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[0:1] @@ -4414,22 +4418,18 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v10, s[10:11] -; GFX9-NEXT: v_and_or_b32 v11, v1, s13, v0 +; GFX9-NEXT: v_and_or_b32 v15, v1, s13, v0 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], s12, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7] -; GFX9-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9] -; GFX9-NEXT: v_mov_b32_e32 v8, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11] -; GFX9-NEXT: v_mov_b32_e32 v9, 0 -; GFX9-NEXT: v_mov_b32_e32 v10, 16 -; GFX9-NEXT: v_mov_b32_e32 v11, 0 -; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off -; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off +; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v15, s[12:13] +; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v15, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v15, s[2:3] +; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v15, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v15, s[6:7] +; GFX9-NEXT: v_cndmask_b32_e64 v6, v9, v15, s[8:9] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v10, v15, s[10:11] +; GFX9-NEXT: global_store_dwordx4 v[11:12], v[0:3], off +; GFX9-NEXT: global_store_dwordx4 v[13:14], v[4:7], off ; GFX9-NEXT: s_endpgm ; ; GFX8-LABEL: insertelement_v_v16i16_v_s: @@ -4557,7 +4557,11 @@ ; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v0 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v0 ; GFX9-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX9-NEXT: v_mov_b32_e32 v12, 0 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v0 +; GFX9-NEXT: v_mov_b32_e32 v13, 0 +; GFX9-NEXT: v_mov_b32_e32 v14, 16 +; GFX9-NEXT: v_mov_b32_e32 v15, 0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v6, s[0:1] @@ -4567,21 +4571,17 @@ ; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v9, s[6:7] ; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[8:9] ; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[10:11] -; GFX9-NEXT: v_and_or_b32 v12, v3, v1, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, v12, s[12:13] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v12, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v12, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v9, v12, s[6:7] -; GFX9-NEXT: v_mov_b32_e32 v8, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, v12, s[2:3] -; GFX9-NEXT: v_cndmask_b32_e64 v6, v10, v12, s[8:9] -; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, v12, s[10:11] -; GFX9-NEXT: v_mov_b32_e32 v9, 0 -; GFX9-NEXT: v_mov_b32_e32 v10, 16 -; GFX9-NEXT: v_mov_b32_e32 v11, 0 -; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off -; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off +; GFX9-NEXT: v_and_or_b32 v16, v3, v1, v2 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, v16, s[12:13] +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v16, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v16, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, v16, s[2:3] +; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v16, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v9, v16, s[6:7] +; GFX9-NEXT: v_cndmask_b32_e64 v6, v10, v16, s[8:9] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, v16, s[10:11] +; GFX9-NEXT: global_store_dwordx4 v[12:13], v[0:3], off +; GFX9-NEXT: global_store_dwordx4 v[14:15], v[4:7], off ; GFX9-NEXT: s_endpgm ; ; GFX8-LABEL: insertelement_v_v16i16_v_v: @@ -4604,7 +4604,11 @@ ; GFX8-NEXT: v_cmp_eq_u32_e64 s[8:9], 6, v0 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v0 ; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX8-NEXT: v_mov_b32_e32 v12, 0 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v0 +; GFX8-NEXT: v_mov_b32_e32 v13, 0 +; GFX8-NEXT: v_mov_b32_e32 v14, 16 +; GFX8-NEXT: v_mov_b32_e32 v15, 0 ; GFX8-NEXT: s_waitcnt vmcnt(1) ; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v6, s[0:1] @@ -4615,21 +4619,17 @@ ; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[8:9] ; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[10:11] ; GFX8-NEXT: v_and_b32_e32 v1, v3, v1 -; GFX8-NEXT: v_or_b32_e32 v12, v1, v2 -; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v12, s[12:13] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v12, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, v12, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v5, v9, v12, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v8, 0 -; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v12, s[2:3] -; GFX8-NEXT: v_cndmask_b32_e64 v6, v10, v12, s[8:9] -; GFX8-NEXT: v_cndmask_b32_e64 v7, v11, v12, s[10:11] -; GFX8-NEXT: v_mov_b32_e32 v9, 0 -; GFX8-NEXT: v_mov_b32_e32 v10, 16 -; GFX8-NEXT: v_mov_b32_e32 v11, 0 -; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3] -; GFX8-NEXT: flat_store_dwordx4 v[10:11], v[4:7] +; GFX8-NEXT: v_or_b32_e32 v16, v1, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v16, s[12:13] +; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v16, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v16, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v16, s[2:3] +; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, v16, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v9, v16, s[6:7] +; GFX8-NEXT: v_cndmask_b32_e64 v6, v10, v16, s[8:9] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v11, v16, s[10:11] +; GFX8-NEXT: flat_store_dwordx4 v[12:13], v[0:3] +; GFX8-NEXT: flat_store_dwordx4 v[14:15], v[4:7] ; GFX8-NEXT: s_endpgm ; ; GFX7-LABEL: insertelement_v_v16i16_v_v: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll @@ -9,43 +9,34 @@ ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 ; GCN-NEXT: v_lshlrev_b32_e32 v64, 8, v0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: global_load_dwordx4 v[4:7], v64, s[0:1] offset:144 ; GCN-NEXT: global_load_dwordx4 v[0:3], v64, s[0:1] -; GCN-NEXT: global_load_dwordx4 v[8:11], v64, s[0:1] offset:16 -; GCN-NEXT: global_load_dwordx4 v[12:15], v64, s[0:1] offset:32 -; GCN-NEXT: global_load_dwordx4 v[16:19], v64, s[0:1] offset:48 -; GCN-NEXT: global_load_dwordx4 v[20:23], v64, s[0:1] offset:64 -; GCN-NEXT: global_load_dwordx4 v[24:27], v64, s[0:1] offset:80 -; GCN-NEXT: global_load_dwordx4 v[28:31], v64, s[0:1] offset:96 -; GCN-NEXT: global_load_dwordx4 v[32:35], v64, s[0:1] offset:112 -; GCN-NEXT: global_load_dwordx4 v[36:39], v64, s[0:1] offset:128 +; GCN-NEXT: global_load_dwordx4 v[4:7], v64, s[0:1] offset:16 +; GCN-NEXT: global_load_dwordx4 v[8:11], v64, s[0:1] offset:32 +; GCN-NEXT: global_load_dwordx4 v[12:15], v64, s[0:1] offset:48 +; GCN-NEXT: global_load_dwordx4 v[16:19], v64, s[0:1] offset:64 +; GCN-NEXT: global_load_dwordx4 v[20:23], v64, s[0:1] offset:80 +; GCN-NEXT: global_load_dwordx4 v[24:27], v64, s[0:1] offset:96 +; GCN-NEXT: global_load_dwordx4 v[28:31], v64, s[0:1] offset:112 +; GCN-NEXT: global_load_dwordx4 v[32:35], v64, s[0:1] offset:128 +; GCN-NEXT: global_load_dwordx4 v[36:39], v64, s[0:1] offset:144 ; GCN-NEXT: global_load_dwordx4 v[40:43], v64, s[0:1] offset:160 ; GCN-NEXT: global_load_dwordx4 v[44:47], v64, s[0:1] offset:176 ; GCN-NEXT: global_load_dwordx4 v[48:51], v64, s[0:1] offset:192 ; GCN-NEXT: global_load_dwordx4 v[52:55], v64, s[0:1] offset:208 ; GCN-NEXT: global_load_dwordx4 v[56:59], v64, s[0:1] offset:224 ; GCN-NEXT: global_load_dwordx4 v[60:63], v64, s[0:1] offset:240 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: v_mov_b32_e32 v5, 0x3e7 -; GCN-NEXT: global_store_dwordx4 v64, v[4:7], s[2:3] offset:144 -; GCN-NEXT: s_waitcnt vmcnt(15) +; GCN-NEXT: s_waitcnt vmcnt(6) +; GCN-NEXT: v_mov_b32_e32 v37, 0x3e7 ; GCN-NEXT: global_store_dwordx4 v64, v[0:3], s[2:3] -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[8:11], s[2:3] offset:16 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[12:15], s[2:3] offset:32 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[16:19], s[2:3] offset:48 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[20:23], s[2:3] offset:64 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[24:27], s[2:3] offset:80 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[28:31], s[2:3] offset:96 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[32:35], s[2:3] offset:112 -; GCN-NEXT: s_waitcnt vmcnt(15) -; GCN-NEXT: global_store_dwordx4 v64, v[36:39], s[2:3] offset:128 +; GCN-NEXT: global_store_dwordx4 v64, v[4:7], s[2:3] offset:16 +; GCN-NEXT: global_store_dwordx4 v64, v[8:11], s[2:3] offset:32 +; GCN-NEXT: global_store_dwordx4 v64, v[12:15], s[2:3] offset:48 +; GCN-NEXT: global_store_dwordx4 v64, v[16:19], s[2:3] offset:64 +; GCN-NEXT: global_store_dwordx4 v64, v[20:23], s[2:3] offset:80 +; GCN-NEXT: global_store_dwordx4 v64, v[24:27], s[2:3] offset:96 +; GCN-NEXT: global_store_dwordx4 v64, v[28:31], s[2:3] offset:112 +; GCN-NEXT: global_store_dwordx4 v64, v[32:35], s[2:3] offset:128 +; GCN-NEXT: global_store_dwordx4 v64, v[36:39], s[2:3] offset:144 ; GCN-NEXT: s_waitcnt vmcnt(15) ; GCN-NEXT: global_store_dwordx4 v64, v[40:43], s[2:3] offset:160 ; GCN-NEXT: s_waitcnt vmcnt(15) @@ -67,39 +58,45 @@ ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_clause 0xf ; GFX10-NEXT: global_load_dwordx4 v[0:3], v64, s[0:1] -; GFX10-NEXT: global_load_dwordx4 v[8:11], v64, s[0:1] offset:16 -; GFX10-NEXT: global_load_dwordx4 v[12:15], v64, s[0:1] offset:32 -; GFX10-NEXT: global_load_dwordx4 v[16:19], v64, s[0:1] offset:48 -; GFX10-NEXT: global_load_dwordx4 v[20:23], v64, s[0:1] offset:64 -; GFX10-NEXT: global_load_dwordx4 v[24:27], v64, s[0:1] offset:80 -; GFX10-NEXT: global_load_dwordx4 v[28:31], v64, s[0:1] offset:96 -; GFX10-NEXT: global_load_dwordx4 v[32:35], v64, s[0:1] offset:112 -; GFX10-NEXT: global_load_dwordx4 v[36:39], v64, s[0:1] offset:160 -; GFX10-NEXT: global_load_dwordx4 v[40:43], v64, s[0:1] offset:176 -; GFX10-NEXT: global_load_dwordx4 v[44:47], v64, s[0:1] offset:192 -; GFX10-NEXT: global_load_dwordx4 v[48:51], v64, s[0:1] offset:208 -; GFX10-NEXT: global_load_dwordx4 v[52:55], v64, s[0:1] offset:224 -; GFX10-NEXT: global_load_dwordx4 v[56:59], v64, s[0:1] offset:240 -; GFX10-NEXT: global_load_dwordx4 v[60:63], v64, s[0:1] offset:128 -; GFX10-NEXT: global_load_dwordx4 v[4:7], v64, s[0:1] offset:144 -; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v5, 0x3e7 +; GFX10-NEXT: global_load_dwordx4 v[4:7], v64, s[0:1] offset:16 +; GFX10-NEXT: global_load_dwordx4 v[8:11], v64, s[0:1] offset:32 +; GFX10-NEXT: global_load_dwordx4 v[12:15], v64, s[0:1] offset:48 +; GFX10-NEXT: global_load_dwordx4 v[16:19], v64, s[0:1] offset:64 +; GFX10-NEXT: global_load_dwordx4 v[20:23], v64, s[0:1] offset:80 +; GFX10-NEXT: global_load_dwordx4 v[24:27], v64, s[0:1] offset:96 +; GFX10-NEXT: global_load_dwordx4 v[28:31], v64, s[0:1] offset:112 +; GFX10-NEXT: global_load_dwordx4 v[32:35], v64, s[0:1] offset:128 +; GFX10-NEXT: global_load_dwordx4 v[36:39], v64, s[0:1] offset:144 +; GFX10-NEXT: global_load_dwordx4 v[40:43], v64, s[0:1] offset:160 +; GFX10-NEXT: global_load_dwordx4 v[44:47], v64, s[0:1] offset:176 +; GFX10-NEXT: global_load_dwordx4 v[48:51], v64, s[0:1] offset:192 +; GFX10-NEXT: global_load_dwordx4 v[52:55], v64, s[0:1] offset:208 +; GFX10-NEXT: global_load_dwordx4 v[56:59], v64, s[0:1] offset:224 +; GFX10-NEXT: global_load_dwordx4 v[60:63], v64, s[0:1] offset:240 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_mov_b32_e32 v37, 0x3e7 ; GFX10-NEXT: global_store_dwordx4 v64, v[0:3], s[2:3] -; GFX10-NEXT: global_store_dwordx4 v64, v[8:11], s[2:3] offset:16 -; GFX10-NEXT: global_store_dwordx4 v64, v[12:15], s[2:3] offset:32 -; GFX10-NEXT: global_store_dwordx4 v64, v[16:19], s[2:3] offset:48 -; GFX10-NEXT: global_store_dwordx4 v64, v[20:23], s[2:3] offset:64 -; GFX10-NEXT: global_store_dwordx4 v64, v[24:27], s[2:3] offset:80 -; GFX10-NEXT: global_store_dwordx4 v64, v[28:31], s[2:3] offset:96 -; GFX10-NEXT: global_store_dwordx4 v64, v[32:35], s[2:3] offset:112 -; GFX10-NEXT: global_store_dwordx4 v64, v[60:63], s[2:3] offset:128 -; GFX10-NEXT: global_store_dwordx4 v64, v[4:7], s[2:3] offset:144 -; GFX10-NEXT: global_store_dwordx4 v64, v[36:39], s[2:3] offset:160 -; GFX10-NEXT: global_store_dwordx4 v64, v[40:43], s[2:3] offset:176 -; GFX10-NEXT: global_store_dwordx4 v64, v[44:47], s[2:3] offset:192 -; GFX10-NEXT: global_store_dwordx4 v64, v[48:51], s[2:3] offset:208 -; GFX10-NEXT: global_store_dwordx4 v64, v[52:55], s[2:3] offset:224 -; GFX10-NEXT: global_store_dwordx4 v64, v[56:59], s[2:3] offset:240 +; GFX10-NEXT: global_store_dwordx4 v64, v[4:7], s[2:3] offset:16 +; GFX10-NEXT: global_store_dwordx4 v64, v[8:11], s[2:3] offset:32 +; GFX10-NEXT: global_store_dwordx4 v64, v[12:15], s[2:3] offset:48 +; GFX10-NEXT: global_store_dwordx4 v64, v[16:19], s[2:3] offset:64 +; GFX10-NEXT: global_store_dwordx4 v64, v[20:23], s[2:3] offset:80 +; GFX10-NEXT: global_store_dwordx4 v64, v[24:27], s[2:3] offset:96 +; GFX10-NEXT: global_store_dwordx4 v64, v[28:31], s[2:3] offset:112 +; GFX10-NEXT: global_store_dwordx4 v64, v[32:35], s[2:3] offset:128 +; GFX10-NEXT: global_store_dwordx4 v64, v[36:39], s[2:3] offset:144 +; GFX10-NEXT: s_waitcnt vmcnt(5) +; GFX10-NEXT: global_store_dwordx4 v64, v[40:43], s[2:3] offset:160 +; GFX10-NEXT: s_waitcnt vmcnt(4) +; GFX10-NEXT: global_store_dwordx4 v64, v[44:47], s[2:3] offset:176 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: global_store_dwordx4 v64, v[48:51], s[2:3] offset:192 +; GFX10-NEXT: s_waitcnt vmcnt(2) +; GFX10-NEXT: global_store_dwordx4 v64, v[52:55], s[2:3] offset:208 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: global_store_dwordx4 v64, v[56:59], s[2:3] offset:224 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_store_dwordx4 v64, v[60:63], s[2:3] offset:240 ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: v_insert_v64i32_37: @@ -109,15 +106,15 @@ ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_clause 0xf ; GFX11-NEXT: global_load_b128 v[0:3], v64, s[0:1] -; GFX11-NEXT: global_load_b128 v[8:11], v64, s[0:1] offset:16 -; GFX11-NEXT: global_load_b128 v[12:15], v64, s[0:1] offset:32 -; GFX11-NEXT: global_load_b128 v[16:19], v64, s[0:1] offset:48 -; GFX11-NEXT: global_load_b128 v[20:23], v64, s[0:1] offset:64 -; GFX11-NEXT: global_load_b128 v[24:27], v64, s[0:1] offset:80 -; GFX11-NEXT: global_load_b128 v[28:31], v64, s[0:1] offset:96 -; GFX11-NEXT: global_load_b128 v[32:35], v64, s[0:1] offset:112 -; GFX11-NEXT: global_load_b128 v[36:39], v64, s[0:1] offset:128 -; GFX11-NEXT: global_load_b128 v[4:7], v64, s[0:1] offset:144 +; GFX11-NEXT: global_load_b128 v[4:7], v64, s[0:1] offset:16 +; GFX11-NEXT: global_load_b128 v[8:11], v64, s[0:1] offset:32 +; GFX11-NEXT: global_load_b128 v[12:15], v64, s[0:1] offset:48 +; GFX11-NEXT: global_load_b128 v[16:19], v64, s[0:1] offset:64 +; GFX11-NEXT: global_load_b128 v[20:23], v64, s[0:1] offset:80 +; GFX11-NEXT: global_load_b128 v[24:27], v64, s[0:1] offset:96 +; GFX11-NEXT: global_load_b128 v[28:31], v64, s[0:1] offset:112 +; GFX11-NEXT: global_load_b128 v[32:35], v64, s[0:1] offset:128 +; GFX11-NEXT: global_load_b128 v[36:39], v64, s[0:1] offset:144 ; GFX11-NEXT: global_load_b128 v[40:43], v64, s[0:1] offset:160 ; GFX11-NEXT: global_load_b128 v[44:47], v64, s[0:1] offset:176 ; GFX11-NEXT: global_load_b128 v[48:51], v64, s[0:1] offset:192 @@ -125,18 +122,18 @@ ; GFX11-NEXT: global_load_b128 v[56:59], v64, s[0:1] offset:224 ; GFX11-NEXT: global_load_b128 v[60:63], v64, s[0:1] offset:240 ; GFX11-NEXT: s_waitcnt vmcnt(6) -; GFX11-NEXT: v_mov_b32_e32 v5, 0x3e7 +; GFX11-NEXT: v_mov_b32_e32 v37, 0x3e7 ; GFX11-NEXT: s_clause 0x9 ; GFX11-NEXT: global_store_b128 v64, v[0:3], s[2:3] -; GFX11-NEXT: global_store_b128 v64, v[8:11], s[2:3] offset:16 -; GFX11-NEXT: global_store_b128 v64, v[12:15], s[2:3] offset:32 -; GFX11-NEXT: global_store_b128 v64, v[16:19], s[2:3] offset:48 -; GFX11-NEXT: global_store_b128 v64, v[20:23], s[2:3] offset:64 -; GFX11-NEXT: global_store_b128 v64, v[24:27], s[2:3] offset:80 -; GFX11-NEXT: global_store_b128 v64, v[28:31], s[2:3] offset:96 -; GFX11-NEXT: global_store_b128 v64, v[32:35], s[2:3] offset:112 -; GFX11-NEXT: global_store_b128 v64, v[36:39], s[2:3] offset:128 -; GFX11-NEXT: global_store_b128 v64, v[4:7], s[2:3] offset:144 +; GFX11-NEXT: global_store_b128 v64, v[4:7], s[2:3] offset:16 +; GFX11-NEXT: global_store_b128 v64, v[8:11], s[2:3] offset:32 +; GFX11-NEXT: global_store_b128 v64, v[12:15], s[2:3] offset:48 +; GFX11-NEXT: global_store_b128 v64, v[16:19], s[2:3] offset:64 +; GFX11-NEXT: global_store_b128 v64, v[20:23], s[2:3] offset:80 +; GFX11-NEXT: global_store_b128 v64, v[24:27], s[2:3] offset:96 +; GFX11-NEXT: global_store_b128 v64, v[28:31], s[2:3] offset:112 +; GFX11-NEXT: global_store_b128 v64, v[32:35], s[2:3] offset:128 +; GFX11-NEXT: global_store_b128 v64, v[36:39], s[2:3] offset:144 ; GFX11-NEXT: s_waitcnt vmcnt(5) ; GFX11-NEXT: global_store_b128 v64, v[40:43], s[2:3] offset:160 ; GFX11-NEXT: s_waitcnt vmcnt(4) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll @@ -888,6 +888,7 @@ ; GFX10-NEXT: s_mov_b64 s[6:7], 2.0 ; GFX10-NEXT: v_mov_b32_e32 v3, s4 ; GFX10-NEXT: v_mov_b32_e32 v4, s5 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX10-NEXT: v_mov_b32_e32 v5, s6 ; GFX10-NEXT: v_mov_b32_e32 v6, s7 ; GFX10-NEXT: v_mov_b32_e32 v7, s8 @@ -902,30 +903,29 @@ ; GFX10-NEXT: v_mov_b32_e32 v16, s17 ; GFX10-NEXT: v_mov_b32_e32 v17, s18 ; GFX10-NEXT: v_mov_b32_e32 v18, s19 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 3, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s10, 2, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 4, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s7, 5, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s8, 6, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s9, 7, v2 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v0, s4 ; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v2 +; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 7, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v0, s4 ; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v1, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v0, s10 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v0, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v1, s10 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v1, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v0, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v0, s7 -; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, v1, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v1, s7 -; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, v0, s8 -; GFX10-NEXT: v_cndmask_b32_e64 v17, v17, v0, s9 -; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v1, s8 -; GFX10-NEXT: v_cndmask_b32_e64 v18, v18, v1, s9 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 3, v2 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v1, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v17, v17, v0, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v0, s4 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v1, s4 +; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 5, v2 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v12, v12, v1, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v18, v18, v1, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v0, s4 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v1, s4 +; GFX10-NEXT: v_cndmask_b32_e32 v15, v15, v0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v16, v16, v1, vcc_lo ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[3:6], off ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[7:10], off @@ -1112,26 +1112,26 @@ ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s18, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s19, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v0 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 2, v0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 7, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s18, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, s19, s0 -; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 4, v0 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 5, v0 -; GFX10-NEXT: v_cmp_eq_u32_e64 s3, 6, v0 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 7, v0 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s18, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s19, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s18, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, s19, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, s18, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, s19, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, s18, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, s19, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, s18, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, s19, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, s18, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, s19, s4 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s18, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s19, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, s18, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s18, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, s19, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 5, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, s18, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, s19, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, s19, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, s18, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, s19, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, s18, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, s19, vcc_lo ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[1:4], off ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[5:8], off @@ -1513,6 +1513,9 @@ ; GFX10-NEXT: s_mov_b32 s12, s14 ; GFX10-NEXT: s_mov_b32 s14, s16 ; GFX10-NEXT: v_mov_b32_e32 v18, s15 +; GFX10-NEXT: v_mov_b32_e32 v4, s1 +; GFX10-NEXT: v_mov_b32_e32 v3, s0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX10-NEXT: v_mov_b32_e32 v17, s14 ; GFX10-NEXT: v_mov_b32_e32 v16, s13 ; GFX10-NEXT: v_mov_b32_e32 v15, s12 @@ -1526,32 +1529,29 @@ ; GFX10-NEXT: v_mov_b32_e32 v7, s4 ; GFX10-NEXT: v_mov_b32_e32 v6, s3 ; GFX10-NEXT: v_mov_b32_e32 v5, s2 -; GFX10-NEXT: v_mov_b32_e32 v4, s1 -; GFX10-NEXT: v_mov_b32_e32 v3, s0 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 3, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 2, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 4, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s3, 5, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 6, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 7, v2 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v0, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v1, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v2 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 7, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v0, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v1, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v0, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v0, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v1, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v1, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v0, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, v1, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v1, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, v0, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v17, v17, v0, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v1, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v18, v18, v1, s5 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v2 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v1, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v17, v17, v0, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v0, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v1, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 5, v2 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v12, v12, v1, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v18, v18, v1, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v0, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v1, s0 +; GFX10-NEXT: v_cndmask_b32_e32 v15, v15, v0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v16, v16, v1, vcc_lo ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[3:6], off ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[7:10], off @@ -1635,31 +1635,31 @@ ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_s_v: ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: v_mov_b32_e32 v17, s2 +; GPRIDX-NEXT: v_mov_b32_e32 v18, s3 ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[14:15], 2, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v16 -; GPRIDX-NEXT: v_mov_b32_e32 v16, s3 ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v17, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v17, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v16, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v16, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v17, s[14:15] -; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v17, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v17, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v17, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v17, s[12:13] -; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v17, s[10:11] -; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v16, s[14:15] -; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v16, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v16, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v16, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v16, s[12:13] -; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v16, s[10:11] +; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v11, v11, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v12, v12, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v13, v13, v18, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v14, v14, v17, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v15, v15, v18, vcc ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off ; GPRIDX-NEXT: s_waitcnt vmcnt(0) ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off @@ -1673,29 +1673,29 @@ ; GFX10-LABEL: dyn_insertelement_v8f64_v_s_v: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v16 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v16 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 7, v16 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16 -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s3, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s3, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, s2, s1 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, s2, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s3, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v16 -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s3, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s3, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 5, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, s3, s1 ; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, s2, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, s3, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v16 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, s3, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, s3, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, s2, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, s3, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v16 -; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, s3, vcc_lo ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[0:3], off ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[4:7], off @@ -1709,29 +1709,29 @@ ; GFX11-LABEL: dyn_insertelement_v8f64_v_s_v: ; GFX11: ; %bb.0: ; %entry ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v16 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v16 +; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 7, v16 ; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16 -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s3, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s3, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v14, v14, s2, s1 ; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, s2, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, s3, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v16 -; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s2, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s3, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s3, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 5, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v15, v15, s3, s1 ; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, s2, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, s3, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 5, v16 -; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, s2, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, s3, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, s3, s0 ; GFX11-NEXT: v_cndmask_b32_e64 v12, v12, s2, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v13, v13, s3, vcc_lo -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 7, v16 -; GFX11-NEXT: v_cndmask_b32_e64 v14, v14, s2, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v15, v15, s3, vcc_lo ; GFX11-NEXT: global_store_b128 v[0:1], v[0:3], off dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off dlc @@ -1820,29 +1820,29 @@ ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_v_v: ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v18 ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v16, s[0:1] -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v18 ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v17, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v16, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v16, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v16, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v16, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v16, s[12:13] -; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v16, s[10:11] -; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v17, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v17, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v17, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v17, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[12:13] -; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[10:11] +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v10, v10, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v15, v15, v17, vcc ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off ; GPRIDX-NEXT: s_waitcnt vmcnt(0) ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off @@ -1857,28 +1857,28 @@ ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v18 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 2, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 3, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s3, 4, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 5, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 7, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 6, v18 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 7, v18 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v17, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v16, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v16, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v17, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v17, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v16, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v16, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v17, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v17, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, v16, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v16, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v17, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, v17, s5 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v16, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v16, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v17, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 5, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, v17, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v9, v9, v17, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v16, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v17, s0 +; GFX10-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc_lo ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[0:3], off ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[4:7], off @@ -1893,27 +1893,24 @@ ; GFX11: ; %bb.0: ; %entry ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v18 ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 2, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 3, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s3, 4, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s4, 5, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s5, 7, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s6, 6, v18 +; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 7, v18 ; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v16 :: v_dual_cndmask_b32 v1, v1, v17 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v18 ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 ; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v17, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v16, s1 -; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v16, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v17, s1 -; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v17, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, v16, s3 -; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, v16, s4 -; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, v17, s3 -; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, v17, s4 -; GFX11-NEXT: v_cndmask_b32_e64 v12, v12, v16, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v14, v14, v16, s5 -; GFX11-NEXT: v_cndmask_b32_e64 v13, v13, v17, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v15, v15, v17, s5 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v14, v14, v16, s1 +; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v16 :: v_dual_cndmask_b32 v5, v5, v17 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v16, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v17, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 5, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v15, v15, v17, s1 +; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v16 :: v_dual_cndmask_b32 v9, v9, v17 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, v16, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, v17, s0 +; GFX11-NEXT: v_dual_cndmask_b32 v12, v12, v16 :: v_dual_cndmask_b32 v13, v13, v17 ; GFX11-NEXT: global_store_b128 v[0:1], v[0:3], off dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off dlc @@ -2550,29 +2547,29 @@ ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: v_add_u32_e32 v18, 1, v18 ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v18 ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v16, s[0:1] -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 7, v18 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[12:13], 6, v18 ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v17, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v16, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v16, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v16, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v16, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v16, s[12:13] -; GPRIDX-NEXT: v_cndmask_b32_e64 v14, v14, v16, s[10:11] -; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v17, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v17, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v17, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v17, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[12:13] -; GPRIDX-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[10:11] +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v10, v10, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v11, v11, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 7, v18 +; GPRIDX-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v15, v15, v17, vcc ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[0:3], off ; GPRIDX-NEXT: s_waitcnt vmcnt(0) ; GPRIDX-NEXT: global_store_dwordx4 v[0:1], v[4:7], off @@ -2588,28 +2585,28 @@ ; GFX10-NEXT: v_add_nc_u32_e32 v18, 1, v18 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v18 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 2, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 3, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s3, 4, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 5, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 7, v18 -; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 6, v18 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 7, v18 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v17, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v16, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v16, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v17, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v17, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v16, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v16, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v17, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v17, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, v16, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v16, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v17, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, v17, s5 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v14, v16, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v16, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v17, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 5, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v15, v17, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v9, v9, v17, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v18 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v16, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v17, s0 +; GFX10-NEXT: v_cndmask_b32_e32 v12, v12, v16, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc_lo ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[0:3], off ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_store_dwordx4 v[0:1], v[4:7], off @@ -2626,27 +2623,26 @@ ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v18 ; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 2, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 3, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s3, 4, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s4, 5, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s5, 7, v18 -; GFX11-NEXT: v_cmp_eq_u32_e64 s6, 6, v18 ; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v18 +; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 7, v18 ; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, v16, s0 ; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v17, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v16, s1 -; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v16, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v17, s1 -; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v17, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, v16, s3 -; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, v16, s4 -; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, v17, s3 -; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, v17, s4 -; GFX11-NEXT: v_cndmask_b32_e64 v12, v12, v16, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v14, v14, v16, s5 -; GFX11-NEXT: v_cndmask_b32_e64 v13, v13, v17, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v15, v15, v17, s5 +; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v18 +; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v14, v14, v16, s1 +; GFX11-NEXT: v_cndmask_b32_e64 v15, v15, v17, s1 +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v16, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v17, s0 +; GFX11-NEXT: v_cndmask_b32_e32 v9, v9, v17, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 5, v18 +; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 6, v18 +; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, v16, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, v17, s0 +; GFX11-NEXT: v_dual_cndmask_b32 v13, v13, v17 :: v_dual_cndmask_b32 v12, v12, v16 ; GFX11-NEXT: global_store_b128 v[0:1], v[0:3], off dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off dlc @@ -6299,26 +6295,26 @@ ; GPRIDX-LABEL: dyn_insertelement_v7f64_v_v_v: ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], 5, v16 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[10:11], 6, v16 ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v14, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v14, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v14, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v14, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v10, v10, v14, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v12, v12, v14, s[10:11] ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v15, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v15, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v15, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v15, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v15, s[6:7] -; GPRIDX-NEXT: v_cndmask_b32_e64 v11, v11, v15, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v13, v13, v15, s[10:11] +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v15, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v14, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v15, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v14, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v15, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v14, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 5, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v11, v11, v15, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 6, v16 +; GPRIDX-NEXT: v_cndmask_b32_e32 v12, v12, v14, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v13, v13, v15, vcc ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 @@ -6339,27 +6335,27 @@ ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v16 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v16 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 2, v16 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 3, v16 -; GFX10-NEXT: v_cmp_eq_u32_e64 s3, 4, v16 -; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 5, v16 -; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 6, v16 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 6, v16 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v14, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v14, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v14, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v14, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v14, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v14, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, v14, s5 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v15, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v14, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v15, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v15, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v15, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v15, s3 -; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v15, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v15, s5 -; GFX10-NEXT: v_readfirstlane_b32 s0, v0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, v14, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v14, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v15, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v14, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v15, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 5, v16 +; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, v15, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v14, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc_lo ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v14, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v15, s0 +; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s2, v2 ; GFX10-NEXT: v_readfirstlane_b32 s3, v3 ; GFX10-NEXT: v_readfirstlane_b32 s4, v4 @@ -6377,35 +6373,37 @@ ; GFX11-LABEL: dyn_insertelement_v7f64_v_v_v: ; GFX11: ; %bb.0: ; %entry ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v16 -; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 5, v16 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v16 ; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 6, v16 ; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v14 :: v_dual_cndmask_b32 v1, v1, v15 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16 -; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, v14, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, v15, s0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, v14, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v15, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v16 ; GFX11-NEXT: v_cndmask_b32_e64 v12, v12, v14, s1 +; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v14 :: v_dual_cndmask_b32 v5, v5, v15 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v16 +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v14, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v15, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 5, v16 ; GFX11-NEXT: v_cndmask_b32_e64 v13, v13, v15, s1 -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v14 :: v_dual_cndmask_b32 v3, v3, v15 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v16 -; GFX11-NEXT: v_readfirstlane_b32 s0, v0 +; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v14 :: v_dual_cndmask_b32 v9, v9, v15 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, v14, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, v15, s0 +; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 ; GFX11-NEXT: v_readfirstlane_b32 s3, v3 -; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v14 :: v_dual_cndmask_b32 v5, v5, v15 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v16 -; GFX11-NEXT: v_readfirstlane_b32 s10, v10 -; GFX11-NEXT: v_readfirstlane_b32 s11, v11 ; GFX11-NEXT: v_readfirstlane_b32 s4, v4 ; GFX11-NEXT: v_readfirstlane_b32 s5, v5 -; GFX11-NEXT: v_dual_cndmask_b32 v6, v6, v14 :: v_dual_cndmask_b32 v7, v7, v15 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 4, v16 -; GFX11-NEXT: v_readfirstlane_b32 s12, v12 -; GFX11-NEXT: v_readfirstlane_b32 s13, v13 ; GFX11-NEXT: v_readfirstlane_b32 s6, v6 ; GFX11-NEXT: v_readfirstlane_b32 s7, v7 -; GFX11-NEXT: v_dual_cndmask_b32 v8, v8, v14 :: v_dual_cndmask_b32 v9, v9, v15 ; GFX11-NEXT: v_readfirstlane_b32 s8, v8 ; GFX11-NEXT: v_readfirstlane_b32 s9, v9 +; GFX11-NEXT: v_readfirstlane_b32 s10, v10 +; GFX11-NEXT: v_readfirstlane_b32 s11, v11 +; GFX11-NEXT: v_readfirstlane_b32 s12, v12 +; GFX11-NEXT: v_readfirstlane_b32 s13, v13 ; GFX11-NEXT: ; return to shader part epilog entry: %insert = insertelement <7 x double> %vec, double %val, i32 %idx @@ -6739,20 +6737,20 @@ ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_s: ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], s2, 1 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[8:9], s2, 2 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], s2, 3 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], s2, 4 ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v10, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v10, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[6:7] ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[8:9] -; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v11, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[6:7] +; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 2 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 3 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e64 vcc, s2, 4 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 @@ -6768,55 +6766,56 @@ ; GFX10-LABEL: dyn_insertelement_v5f64_v_v_s: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 -; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s2, 4 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s2, 1 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, s2, 4 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v10, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v11, s0 -; GFX10-NEXT: v_readfirstlane_b32 s0, v0 -; GFX10-NEXT: v_readfirstlane_b32 s1, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v10, s0 ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 -; GFX10-NEXT: v_readfirstlane_b32 s8, v8 -; GFX10-NEXT: v_readfirstlane_b32 s9, v9 -; GFX10-NEXT: v_readfirstlane_b32 s3, v3 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v11, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s2, 3 +; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v10, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v11, s1 ; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v10, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v11, s0 +; GFX10-NEXT: v_readfirstlane_b32 s0, v0 +; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: v_readfirstlane_b32 s2, v2 +; GFX10-NEXT: v_readfirstlane_b32 s3, v3 ; GFX10-NEXT: v_readfirstlane_b32 s4, v4 ; GFX10-NEXT: v_readfirstlane_b32 s5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc_lo ; GFX10-NEXT: v_readfirstlane_b32 s6, v6 ; GFX10-NEXT: v_readfirstlane_b32 s7, v7 +; GFX10-NEXT: v_readfirstlane_b32 s8, v8 +; GFX10-NEXT: v_readfirstlane_b32 s9, v9 ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: dyn_insertelement_v5f64_v_v_s: ; GFX11: ; %bb.0: ; %entry ; GFX11-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 0 -; GFX11-NEXT: v_cmp_eq_u32_e64 s0, s2, 2 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, s2, 1 ; GFX11-NEXT: v_cmp_eq_u32_e64 s1, s2, 4 ; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v10 :: v_dual_cndmask_b32 v1, v1, v11 -; GFX11-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v10, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v11, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, v10, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 2 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v11, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, s2, 3 ; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, v10, s1 ; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, v11, s1 -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11 -; GFX11-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 3 +; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v10 :: v_dual_cndmask_b32 v5, v5, v11 +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v10, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v11, s0 ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 ; GFX11-NEXT: v_readfirstlane_b32 s3, v3 -; GFX11-NEXT: v_dual_cndmask_b32 v6, v6, v10 :: v_dual_cndmask_b32 v7, v7, v11 ; GFX11-NEXT: v_readfirstlane_b32 s4, v4 ; GFX11-NEXT: v_readfirstlane_b32 s5, v5 -; GFX11-NEXT: v_readfirstlane_b32 s8, v8 ; GFX11-NEXT: v_readfirstlane_b32 s6, v6 ; GFX11-NEXT: v_readfirstlane_b32 s7, v7 +; GFX11-NEXT: v_readfirstlane_b32 s8, v8 ; GFX11-NEXT: v_readfirstlane_b32 s9, v9 ; GFX11-NEXT: ; return to shader part epilog entry: @@ -6828,20 +6827,20 @@ ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_v: ; GPRIDX: ; %bb.0: ; %entry ; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 0, v12 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v12 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[2:3], 2, v12 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[4:5], 3, v12 -; GPRIDX-NEXT: v_cmp_eq_u32_e64 s[6:7], 4, v12 ; GPRIDX-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v2, v2, v10, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v6, v6, v10, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v8, v8, v10, s[6:7] ; GPRIDX-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc -; GPRIDX-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[0:1] -; GPRIDX-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3] -; GPRIDX-NEXT: v_cndmask_b32_e64 v7, v7, v11, s[4:5] -; GPRIDX-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[6:7] +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 2, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 3, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc +; GPRIDX-NEXT: v_cmp_eq_u32_e32 vcc, 4, v12 +; GPRIDX-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc +; GPRIDX-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc ; GPRIDX-NEXT: v_readfirstlane_b32 s0, v0 ; GPRIDX-NEXT: v_readfirstlane_b32 s1, v1 ; GPRIDX-NEXT: v_readfirstlane_b32 s2, v2 @@ -6857,55 +6856,56 @@ ; GFX10-LABEL: dyn_insertelement_v5f64_v_v_v: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v12 -; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 4, v12 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 1, v12 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 4, v12 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12 -; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v10, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v11, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v10, s0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v12 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v11, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 3, v12 +; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v10, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v11, s1 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v10, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v11, s0 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v12 -; GFX10-NEXT: v_readfirstlane_b32 s8, v8 -; GFX10-NEXT: v_readfirstlane_b32 s9, v9 ; GFX10-NEXT: v_readfirstlane_b32 s2, v2 ; GFX10-NEXT: v_readfirstlane_b32 s3, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v12 ; GFX10-NEXT: v_readfirstlane_b32 s4, v4 ; GFX10-NEXT: v_readfirstlane_b32 s5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v10, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v11, vcc_lo ; GFX10-NEXT: v_readfirstlane_b32 s6, v6 ; GFX10-NEXT: v_readfirstlane_b32 s7, v7 +; GFX10-NEXT: v_readfirstlane_b32 s8, v8 +; GFX10-NEXT: v_readfirstlane_b32 s9, v9 ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: dyn_insertelement_v5f64_v_v_v: ; GFX11: ; %bb.0: ; %entry ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v12 -; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 2, v12 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v12 ; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 4, v12 ; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v10 :: v_dual_cndmask_b32 v1, v1, v11 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12 -; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, v10, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, v11, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, v10, s0 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v12 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, v11, s0 +; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 3, v12 ; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, v10, s1 ; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, v11, s1 -; GFX11-NEXT: v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11 -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v12 +; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v10 :: v_dual_cndmask_b32 v5, v5, v11 +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, v10, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, v11, s0 ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 ; GFX11-NEXT: v_readfirstlane_b32 s3, v3 -; GFX11-NEXT: v_dual_cndmask_b32 v6, v6, v10 :: v_dual_cndmask_b32 v7, v7, v11 ; GFX11-NEXT: v_readfirstlane_b32 s4, v4 ; GFX11-NEXT: v_readfirstlane_b32 s5, v5 -; GFX11-NEXT: v_readfirstlane_b32 s8, v8 ; GFX11-NEXT: v_readfirstlane_b32 s6, v6 ; GFX11-NEXT: v_readfirstlane_b32 s7, v7 +; GFX11-NEXT: v_readfirstlane_b32 s8, v8 ; GFX11-NEXT: v_readfirstlane_b32 s9, v9 ; GFX11-NEXT: ; return to shader part epilog entry: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll @@ -4259,11 +4259,11 @@ ; GFX6-NEXT: s_bfe_i64 s[0:1], s[2:3], 0x300000 ; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[0:1] ; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0 -; GFX6-NEXT: s_ashr_i32 s3, s7, 31 -; GFX6-NEXT: s_ashr_i32 s2, s7, 15 -; GFX6-NEXT: s_add_u32 s3, s3, 0xffff8000 -; GFX6-NEXT: v_mov_b32_e32 v0, s2 -; GFX6-NEXT: v_mov_b32_e32 v1, s3 +; GFX6-NEXT: s_ashr_i32 s2, s7, 31 +; GFX6-NEXT: s_ashr_i32 s3, s7, 15 +; GFX6-NEXT: s_add_u32 s2, s2, 0xffff8000 +; GFX6-NEXT: v_mov_b32_e32 v0, s3 +; GFX6-NEXT: v_mov_b32_e32 v1, s2 ; GFX6-NEXT: v_mov_b32_e32 v2, s4 ; GFX6-NEXT: v_mov_b32_e32 v3, s5 ; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc @@ -4284,11 +4284,11 @@ ; GFX8-NEXT: s_bfe_i64 s[0:1], s[2:3], 0x300000 ; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[0:1] ; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[0:1], 0 -; GFX8-NEXT: s_ashr_i32 s3, s7, 31 -; GFX8-NEXT: s_ashr_i32 s2, s7, 15 -; GFX8-NEXT: s_add_u32 s3, s3, 0xffff8000 -; GFX8-NEXT: v_mov_b32_e32 v0, s2 -; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: s_ashr_i32 s2, s7, 31 +; GFX8-NEXT: s_ashr_i32 s3, s7, 15 +; GFX8-NEXT: s_add_u32 s2, s2, 0xffff8000 +; GFX8-NEXT: v_mov_b32_e32 v0, s3 +; GFX8-NEXT: v_mov_b32_e32 v1, s2 ; GFX8-NEXT: v_mov_b32_e32 v2, s4 ; GFX8-NEXT: v_mov_b32_e32 v3, s5 ; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc @@ -4328,15 +4328,15 @@ ; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 16 ; GFX10-NEXT: s_add_u32 s4, s0, s2 ; GFX10-NEXT: s_addc_u32 s5, s1, s3 -; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[2:3], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0 ; GFX10-NEXT: v_mov_b32_e32 v1, s5 -; GFX10-NEXT: s_ashr_i32 s0, s5, 31 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_xor_b32 s2, s2, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX10-NEXT: s_ashr_i32 s2, s5, 31 +; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX10-NEXT: s_xor_b32 s0, s1, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX10-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1] ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 @@ -4348,14 +4348,14 @@ ; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 16 ; GFX11-NEXT: s_add_u32 s4, s0, s2 ; GFX11-NEXT: s_addc_u32 s5, s1, s3 -; GFX11-NEXT: v_cmp_lt_i64_e64 s2, s[2:3], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 -; GFX11-NEXT: s_ashr_i32 s0, s5, 31 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s2, s2, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0 +; GFX11-NEXT: s_ashr_i32 s2, s5, 31 +; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX11-NEXT: s_xor_b32 s0, s1, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1] ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 @@ -4703,15 +4703,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_add_u32 s4, s0, s2 ; GFX10-NEXT: s_addc_u32 s5, s1, s3 -; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[2:3], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0 ; GFX10-NEXT: v_mov_b32_e32 v1, s5 -; GFX10-NEXT: s_ashr_i32 s0, s5, 31 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_xor_b32 s2, s2, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX10-NEXT: s_ashr_i32 s2, s5, 31 +; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX10-NEXT: s_xor_b32 s0, s1, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: ; return to shader part epilog @@ -4720,14 +4720,14 @@ ; GFX11: ; %bb.0: ; GFX11-NEXT: s_add_u32 s4, s0, s2 ; GFX11-NEXT: s_addc_u32 s5, s1, s3 -; GFX11-NEXT: v_cmp_lt_i64_e64 s2, s[2:3], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 -; GFX11-NEXT: s_ashr_i32 s0, s5, 31 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s2, s2, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0 +; GFX11-NEXT: s_ashr_i32 s2, s5, 31 +; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX11-NEXT: s_xor_b32 s0, s1, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: ; return to shader part epilog @@ -5120,26 +5120,26 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_add_u32 s8, s0, s4 ; GFX10-NEXT: s_addc_u32 s9, s1, s5 -; GFX10-NEXT: v_cmp_lt_i64_e64 s4, s[4:5], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s10, s[8:9], s[0:1] -; GFX10-NEXT: s_ashr_i32 s0, s9, 31 ; GFX10-NEXT: v_mov_b32_e32 v0, s8 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 +; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1] +; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[4:5], 0 +; GFX10-NEXT: s_ashr_i32 s4, s9, 31 ; GFX10-NEXT: v_mov_b32_e32 v1, s9 -; GFX10-NEXT: s_xor_b32 s8, s4, s10 -; GFX10-NEXT: s_add_u32 s4, s2, s6 -; GFX10-NEXT: s_addc_u32 s5, s3, s7 -; GFX10-NEXT: v_mov_b32_e32 v2, s4 -; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[4:5], s[2:3] +; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000 +; GFX10-NEXT: s_xor_b32 s8, s1, s0 +; GFX10-NEXT: s_add_u32 s0, s2, s6 +; GFX10-NEXT: s_addc_u32 s1, s3, s7 +; GFX10-NEXT: v_mov_b32_e32 v2, s0 +; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[0:1], s[2:3] ; GFX10-NEXT: v_cmp_lt_i64_e64 s3, s[6:7], 0 -; GFX10-NEXT: v_mov_b32_e32 v3, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s8 -; GFX10-NEXT: s_ashr_i32 s0, s5, 31 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s8 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_xor_b32 s2, s3, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s1, s2 +; GFX10-NEXT: v_mov_b32_e32 v3, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8 +; GFX10-NEXT: s_ashr_i32 s4, s1, 31 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8 +; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000 +; GFX10-NEXT: s_xor_b32 s1, s3, s2 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: v_readfirstlane_b32 s2, v2 @@ -5150,24 +5150,24 @@ ; GFX11: ; %bb.0: ; GFX11-NEXT: s_add_u32 s8, s0, s4 ; GFX11-NEXT: s_addc_u32 s9, s1, s5 -; GFX11-NEXT: v_cmp_lt_i64_e64 s4, s[4:5], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s10, s[8:9], s[0:1] -; GFX11-NEXT: s_ashr_i32 s0, s9, 31 ; GFX11-NEXT: v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v1, s9 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s8, s4, s10 -; GFX11-NEXT: s_add_u32 s4, s2, s6 -; GFX11-NEXT: s_addc_u32 s5, s3, s7 -; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 -; GFX11-NEXT: v_cmp_lt_i64_e64 s2, s[4:5], s[2:3] +; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1] +; GFX11-NEXT: v_cmp_lt_i64_e64 s1, s[4:5], 0 +; GFX11-NEXT: s_ashr_i32 s4, s9, 31 +; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000 +; GFX11-NEXT: s_xor_b32 s8, s1, s0 +; GFX11-NEXT: s_add_u32 s0, s2, s6 +; GFX11-NEXT: s_addc_u32 s1, s3, s7 +; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX11-NEXT: v_cmp_lt_i64_e64 s2, s[0:1], s[2:3] ; GFX11-NEXT: v_cmp_lt_i64_e64 s3, s[6:7], 0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s8 -; GFX11-NEXT: s_ashr_i32 s0, s5, 31 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s1, s8 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s2, s3, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s0, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s1, s2 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8 +; GFX11-NEXT: s_ashr_i32 s4, s1, 31 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8 +; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000 +; GFX11-NEXT: s_xor_b32 s1, s3, s2 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1 ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 @@ -5346,14 +5346,14 @@ ; GFX10PLUS-NEXT: s_add_u32 s1, s0, 0x80000000 ; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v1, v0 ; GFX10PLUS-NEXT: v_mov_b32_e32 v1, s4 -; GFX10PLUS-NEXT: s_mov_b32 s3, s0 ; GFX10PLUS-NEXT: s_mov_b32 s2, s0 +; GFX10PLUS-NEXT: s_mov_b32 s3, s0 ; GFX10PLUS-NEXT: v_and_b32_e32 v0, 1, v0 ; GFX10PLUS-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 ; GFX10PLUS-NEXT: v_mov_b32_e32 v0, s8 ; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo -; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo +; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v0, s3, vcc_lo ; GFX10PLUS-NEXT: v_cndmask_b32_e64 v3, v3, s1, vcc_lo ; GFX10PLUS-NEXT: v_readfirstlane_b32 s0, v1 ; GFX10PLUS-NEXT: v_readfirstlane_b32 s1, v2 @@ -6230,28 +6230,29 @@ ; GFX10-NEXT: s_cselect_b32 s1, 1, 0 ; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 ; GFX10-NEXT: s_and_b32 s1, 1, s1 +; GFX10-NEXT: s_ashr_i32 s10, s17, 31 ; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1 +; GFX10-NEXT: s_add_u32 s11, s10, 0x80000000 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX10-NEXT: s_mov_b32 s18, s10 +; GFX10-NEXT: s_mov_b32 s19, s10 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0 -; GFX10-NEXT: s_ashr_i32 s0, s17, 31 -; GFX10-NEXT: v_mov_b32_e32 v2, s9 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_add_u32 s10, s4, s12 -; GFX10-NEXT: s_addc_u32 s11, s5, s13 -; GFX10-NEXT: s_addc_u32 s12, s6, s14 -; GFX10-NEXT: v_cmp_lt_u64_e64 s4, s[10:11], s[4:5] -; GFX10-NEXT: s_addc_u32 s13, s7, s15 +; GFX10-NEXT: s_add_u32 s0, s4, s12 +; GFX10-NEXT: s_addc_u32 s1, s5, s13 +; GFX10-NEXT: s_addc_u32 s2, s6, s14 +; GFX10-NEXT: v_cmp_lt_u64_e64 s4, s[0:1], s[4:5] +; GFX10-NEXT: s_addc_u32 s3, s7, s15 ; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0 -; GFX10-NEXT: s_cmp_eq_u64 s[12:13], s[6:7] +; GFX10-NEXT: s_cmp_eq_u64 s[2:3], s[6:7] ; GFX10-NEXT: v_mov_b32_e32 v1, s8 ; GFX10-NEXT: s_cselect_b32 s8, 1, 0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 -; GFX10-NEXT: v_cmp_lt_i64_e64 s4, s[12:13], s[6:7] +; GFX10-NEXT: v_cmp_lt_i64_e64 s4, s[2:3], s[6:7] ; GFX10-NEXT: v_cmp_lt_i64_e64 s6, s[14:15], 0 ; GFX10-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX10-NEXT: s_mov_b32 s3, s0 -; GFX10-NEXT: s_mov_b32 s2, s0 -; GFX10-NEXT: v_mov_b32_e32 v6, s11 +; GFX10-NEXT: v_mov_b32_e32 v2, s9 +; GFX10-NEXT: v_mov_b32_e32 v6, s1 +; GFX10-NEXT: v_mov_b32_e32 v7, s3 ; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s4 ; GFX10-NEXT: s_and_b32 s4, 1, s8 ; GFX10-NEXT: s_cmp_eq_u64 s[14:15], 0 @@ -6259,34 +6260,33 @@ ; GFX10-NEXT: s_cselect_b32 s5, 1, 0 ; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4 ; GFX10-NEXT: s_and_b32 s5, 1, s5 -; GFX10-NEXT: v_mov_b32_e32 v7, s13 ; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, s5 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc_lo ; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s16 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, 0, s4 ; GFX10-NEXT: v_mov_b32_e32 v5, s17 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo +; GFX10-NEXT: s_ashr_i32 s4, s3, 31 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s18, vcc_lo ; GFX10-NEXT: v_xor_b32_e32 v3, v4, v3 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, s1, vcc_lo -; GFX10-NEXT: v_mov_b32_e32 v5, s10 -; GFX10-NEXT: s_ashr_i32 s0, s13, 31 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s19, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, s11, vcc_lo +; GFX10-NEXT: v_mov_b32_e32 v5, s0 +; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000 ; GFX10-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_mov_b32 s3, s0 -; GFX10-NEXT: s_mov_b32 s2, s0 -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, s12 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s3, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s1, vcc_lo -; GFX10-NEXT: v_readfirstlane_b32 s0, v1 +; GFX10-NEXT: s_mov_b32 s6, s4 +; GFX10-NEXT: s_mov_b32 s0, s4 ; GFX10-NEXT: v_readfirstlane_b32 s1, v2 -; GFX10-NEXT: v_readfirstlane_b32 s2, v0 ; GFX10-NEXT: v_readfirstlane_b32 s3, v4 +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 +; GFX10-NEXT: v_mov_b32_e32 v3, s2 +; GFX10-NEXT: v_readfirstlane_b32 s2, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s4, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s6, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s5, vcc_lo +; GFX10-NEXT: v_readfirstlane_b32 s0, v1 ; GFX10-NEXT: v_readfirstlane_b32 s4, v5 ; GFX10-NEXT: v_readfirstlane_b32 s5, v6 ; GFX10-NEXT: v_readfirstlane_b32 s6, v3 @@ -6312,27 +6312,27 @@ ; GFX11-NEXT: s_cselect_b32 s1, 1, 0 ; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 ; GFX11-NEXT: s_and_b32 s1, 1, s1 +; GFX11-NEXT: s_ashr_i32 s10, s17, 31 ; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, s1 +; GFX11-NEXT: s_add_u32 s11, s10, 0x80000000 ; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-NEXT: s_mov_b32 s18, s10 +; GFX11-NEXT: s_mov_b32 s19, s10 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, 0, s0 -; GFX11-NEXT: s_ashr_i32 s0, s17, 31 -; GFX11-NEXT: v_mov_b32_e32 v2, s9 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_add_u32 s10, s4, s12 -; GFX11-NEXT: s_addc_u32 s11, s5, s13 -; GFX11-NEXT: s_addc_u32 s12, s6, s14 -; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[10:11], s[4:5] -; GFX11-NEXT: s_addc_u32 s13, s7, s15 +; GFX11-NEXT: s_add_u32 s0, s4, s12 +; GFX11-NEXT: s_addc_u32 s1, s5, s13 +; GFX11-NEXT: s_addc_u32 s2, s6, s14 +; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[0:1], s[4:5] +; GFX11-NEXT: s_addc_u32 s3, s7, s15 ; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0 -; GFX11-NEXT: s_cmp_eq_u64 s[12:13], s[6:7] ; GFX11-NEXT: v_mov_b32_e32 v1, s8 -; GFX11-NEXT: s_cselect_b32 s8, 1, 0 +; GFX11-NEXT: s_cmp_eq_u64 s[2:3], s[6:7] +; GFX11-NEXT: v_mov_b32_e32 v2, s9 ; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 -; GFX11-NEXT: v_cmp_lt_i64_e64 s4, s[12:13], s[6:7] +; GFX11-NEXT: v_cmp_lt_i64_e64 s4, s[2:3], s[6:7] +; GFX11-NEXT: s_cselect_b32 s8, 1, 0 ; GFX11-NEXT: v_cmp_lt_i64_e64 s6, s[14:15], 0 -; GFX11-NEXT: s_mov_b32 s3, s0 -; GFX11-NEXT: s_mov_b32 s2, s0 -; GFX11-NEXT: v_dual_mov_b32 v7, s13 :: v_dual_mov_b32 v6, s11 +; GFX11-NEXT: v_dual_mov_b32 v7, s3 :: v_dual_mov_b32 v6, s1 ; GFX11-NEXT: v_cndmask_b32_e64 v4, 0, 1, s4 ; GFX11-NEXT: s_and_b32 s4, 1, s8 ; GFX11-NEXT: s_cmp_eq_u64 s[14:15], 0 @@ -6346,28 +6346,28 @@ ; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 ; GFX11-NEXT: v_cndmask_b32_e64 v4, v5, 0, s4 ; GFX11-NEXT: v_mov_b32_e32 v5, s17 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo +; GFX11-NEXT: s_ashr_i32 s4, s3, 31 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s10, vcc_lo ; GFX11-NEXT: v_xor_b32_e32 v3, v4, v3 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v4, v5, s1, vcc_lo -; GFX11-NEXT: v_mov_b32_e32 v5, s10 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s18, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v4, v5, s11, vcc_lo +; GFX11-NEXT: v_mov_b32_e32 v5, s0 ; GFX11-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo -; GFX11-NEXT: s_ashr_i32 s0, s13, 31 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s19, vcc_lo +; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000 +; GFX11-NEXT: s_mov_b32 s6, s4 +; GFX11-NEXT: s_mov_b32 s0, s4 ; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GFX11-NEXT: v_mov_b32_e32 v3, s12 -; GFX11-NEXT: s_mov_b32 s3, s0 -; GFX11-NEXT: s_mov_b32 s2, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, s0, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s3, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s1, vcc_lo -; GFX11-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: v_readfirstlane_b32 s1, v2 ; GFX11-NEXT: v_readfirstlane_b32 s2, v0 ; GFX11-NEXT: v_readfirstlane_b32 s3, v4 +; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, s4, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s6, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s5, vcc_lo +; GFX11-NEXT: v_readfirstlane_b32 s0, v1 ; GFX11-NEXT: v_readfirstlane_b32 s4, v5 ; GFX11-NEXT: v_readfirstlane_b32 s5, v6 ; GFX11-NEXT: v_readfirstlane_b32 s6, v3 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll @@ -4245,11 +4245,11 @@ ; GFX6-NEXT: s_bfe_i64 s[0:1], s[2:3], 0x300000 ; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[0:1] ; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0 -; GFX6-NEXT: s_ashr_i32 s3, s7, 31 -; GFX6-NEXT: s_ashr_i32 s2, s7, 15 -; GFX6-NEXT: s_add_u32 s3, s3, 0xffff8000 -; GFX6-NEXT: v_mov_b32_e32 v0, s2 -; GFX6-NEXT: v_mov_b32_e32 v1, s3 +; GFX6-NEXT: s_ashr_i32 s2, s7, 31 +; GFX6-NEXT: s_ashr_i32 s3, s7, 15 +; GFX6-NEXT: s_add_u32 s2, s2, 0xffff8000 +; GFX6-NEXT: v_mov_b32_e32 v0, s3 +; GFX6-NEXT: v_mov_b32_e32 v1, s2 ; GFX6-NEXT: v_mov_b32_e32 v2, s4 ; GFX6-NEXT: v_mov_b32_e32 v3, s5 ; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc @@ -4270,11 +4270,11 @@ ; GFX8-NEXT: s_bfe_i64 s[0:1], s[2:3], 0x300000 ; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[0:1] ; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[0:1], 0 -; GFX8-NEXT: s_ashr_i32 s3, s7, 31 -; GFX8-NEXT: s_ashr_i32 s2, s7, 15 -; GFX8-NEXT: s_add_u32 s3, s3, 0xffff8000 -; GFX8-NEXT: v_mov_b32_e32 v0, s2 -; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: s_ashr_i32 s2, s7, 31 +; GFX8-NEXT: s_ashr_i32 s3, s7, 15 +; GFX8-NEXT: s_add_u32 s2, s2, 0xffff8000 +; GFX8-NEXT: v_mov_b32_e32 v0, s3 +; GFX8-NEXT: v_mov_b32_e32 v1, s2 ; GFX8-NEXT: v_mov_b32_e32 v2, s4 ; GFX8-NEXT: v_mov_b32_e32 v3, s5 ; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc @@ -4314,15 +4314,15 @@ ; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 16 ; GFX10-NEXT: s_sub_u32 s4, s0, s2 ; GFX10-NEXT: s_subb_u32 s5, s1, s3 -; GFX10-NEXT: v_cmp_gt_i64_e64 s2, s[2:3], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0 ; GFX10-NEXT: v_mov_b32_e32 v1, s5 -; GFX10-NEXT: s_ashr_i32 s0, s5, 31 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_xor_b32 s2, s2, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX10-NEXT: s_ashr_i32 s2, s5, 31 +; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX10-NEXT: s_xor_b32 s0, s1, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX10-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1] ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 @@ -4334,14 +4334,14 @@ ; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 16 ; GFX11-NEXT: s_sub_u32 s4, s0, s2 ; GFX11-NEXT: s_subb_u32 s5, s1, s3 -; GFX11-NEXT: v_cmp_gt_i64_e64 s2, s[2:3], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 -; GFX11-NEXT: s_ashr_i32 s0, s5, 31 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s2, s2, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0 +; GFX11-NEXT: s_ashr_i32 s2, s5, 31 +; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX11-NEXT: s_xor_b32 s0, s1, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX11-NEXT: v_ashrrev_i64 v[0:1], 16, v[0:1] ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 @@ -4689,15 +4689,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_sub_u32 s4, s0, s2 ; GFX10-NEXT: s_subb_u32 s5, s1, s3 -; GFX10-NEXT: v_cmp_gt_i64_e64 s2, s[2:3], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0 ; GFX10-NEXT: v_mov_b32_e32 v1, s5 -; GFX10-NEXT: s_ashr_i32 s0, s5, 31 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_xor_b32 s2, s2, s6 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX10-NEXT: s_ashr_i32 s2, s5, 31 +; GFX10-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX10-NEXT: s_xor_b32 s0, s1, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: ; return to shader part epilog @@ -4706,14 +4706,14 @@ ; GFX11: ; %bb.0: ; GFX11-NEXT: s_sub_u32 s4, s0, s2 ; GFX11-NEXT: s_subb_u32 s5, s1, s3 -; GFX11-NEXT: v_cmp_gt_i64_e64 s2, s[2:3], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s6, s[4:5], s[0:1] ; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 -; GFX11-NEXT: s_ashr_i32 s0, s5, 31 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s2, s2, s6 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s1, s2 +; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1] +; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0 +; GFX11-NEXT: s_ashr_i32 s2, s5, 31 +; GFX11-NEXT: s_add_u32 s3, s2, 0x80000000 +; GFX11-NEXT: s_xor_b32 s0, s1, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s3, s0 ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: ; return to shader part epilog @@ -5106,26 +5106,26 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_sub_u32 s8, s0, s4 ; GFX10-NEXT: s_subb_u32 s9, s1, s5 -; GFX10-NEXT: v_cmp_gt_i64_e64 s4, s[4:5], 0 -; GFX10-NEXT: v_cmp_lt_i64_e64 s10, s[8:9], s[0:1] -; GFX10-NEXT: s_ashr_i32 s0, s9, 31 ; GFX10-NEXT: v_mov_b32_e32 v0, s8 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 +; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1] +; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[4:5], 0 +; GFX10-NEXT: s_ashr_i32 s4, s9, 31 ; GFX10-NEXT: v_mov_b32_e32 v1, s9 -; GFX10-NEXT: s_xor_b32 s8, s4, s10 -; GFX10-NEXT: s_sub_u32 s4, s2, s6 -; GFX10-NEXT: s_subb_u32 s5, s3, s7 -; GFX10-NEXT: v_mov_b32_e32 v2, s4 -; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[4:5], s[2:3] +; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000 +; GFX10-NEXT: s_xor_b32 s8, s1, s0 +; GFX10-NEXT: s_sub_u32 s0, s2, s6 +; GFX10-NEXT: s_subb_u32 s1, s3, s7 +; GFX10-NEXT: v_mov_b32_e32 v2, s0 +; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[0:1], s[2:3] ; GFX10-NEXT: v_cmp_gt_i64_e64 s3, s[6:7], 0 -; GFX10-NEXT: v_mov_b32_e32 v3, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s8 -; GFX10-NEXT: s_ashr_i32 s0, s5, 31 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s8 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_xor_b32 s2, s3, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s0, s2 -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s1, s2 +; GFX10-NEXT: v_mov_b32_e32 v3, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8 +; GFX10-NEXT: s_ashr_i32 s4, s1, 31 +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8 +; GFX10-NEXT: s_add_u32 s0, s4, 0x80000000 +; GFX10-NEXT: s_xor_b32 s1, s3, s2 +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 ; GFX10-NEXT: v_readfirstlane_b32 s2, v2 @@ -5136,24 +5136,24 @@ ; GFX11: ; %bb.0: ; GFX11-NEXT: s_sub_u32 s8, s0, s4 ; GFX11-NEXT: s_subb_u32 s9, s1, s5 -; GFX11-NEXT: v_cmp_gt_i64_e64 s4, s[4:5], 0 -; GFX11-NEXT: v_cmp_lt_i64_e64 s10, s[8:9], s[0:1] -; GFX11-NEXT: s_ashr_i32 s0, s9, 31 ; GFX11-NEXT: v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v1, s9 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s8, s4, s10 -; GFX11-NEXT: s_sub_u32 s4, s2, s6 -; GFX11-NEXT: s_subb_u32 s5, s3, s7 -; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 -; GFX11-NEXT: v_cmp_lt_i64_e64 s2, s[4:5], s[2:3] +; GFX11-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1] +; GFX11-NEXT: v_cmp_gt_i64_e64 s1, s[4:5], 0 +; GFX11-NEXT: s_ashr_i32 s4, s9, 31 +; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000 +; GFX11-NEXT: s_xor_b32 s8, s1, s0 +; GFX11-NEXT: s_sub_u32 s0, s2, s6 +; GFX11-NEXT: s_subb_u32 s1, s3, s7 +; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX11-NEXT: v_cmp_lt_i64_e64 s2, s[0:1], s[2:3] ; GFX11-NEXT: v_cmp_gt_i64_e64 s3, s[6:7], 0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s8 -; GFX11-NEXT: s_ashr_i32 s0, s5, 31 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s1, s8 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_xor_b32 s2, s3, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s0, s2 -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s1, s2 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s8 +; GFX11-NEXT: s_ashr_i32 s4, s1, 31 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s8 +; GFX11-NEXT: s_add_u32 s0, s4, 0x80000000 +; GFX11-NEXT: s_xor_b32 s1, s3, s2 +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s4, s1 +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, s1 ; GFX11-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-NEXT: v_readfirstlane_b32 s1, v1 ; GFX11-NEXT: v_readfirstlane_b32 s2, v2 @@ -5346,8 +5346,8 @@ ; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s10 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s3, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s1, vcc_lo ; GFX10-NEXT: v_readfirstlane_b32 s0, v1 ; GFX10-NEXT: v_readfirstlane_b32 s1, v2 @@ -5389,8 +5389,8 @@ ; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s10 ; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s3, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s1, vcc_lo ; GFX11-NEXT: v_readfirstlane_b32 s0, v1 ; GFX11-NEXT: v_readfirstlane_b32 s1, v2 @@ -6319,22 +6319,22 @@ ; GFX10-NEXT: s_cselect_b32 s1, 1, 0 ; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s2 ; GFX10-NEXT: v_cmp_gt_i64_e64 s2, s[10:11], 0 +; GFX10-NEXT: s_ashr_i32 s8, s19, 31 ; GFX10-NEXT: s_and_b32 s1, 1, s1 -; GFX10-NEXT: s_ashr_i32 s0, s19, 31 +; GFX10-NEXT: s_add_u32 s9, s8, 0x80000000 +; GFX10-NEXT: s_sub_u32 s0, s4, s12 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo ; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: s_sub_u32 s8, s4, s12 -; GFX10-NEXT: s_subb_u32 s9, s5, s13 +; GFX10-NEXT: s_subb_u32 s1, s5, s13 ; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2 -; GFX10-NEXT: v_cmp_lt_u64_e64 s4, s[8:9], s[4:5] -; GFX10-NEXT: s_subb_u32 s10, s6, s14 -; GFX10-NEXT: s_subb_u32 s11, s7, s15 -; GFX10-NEXT: s_mov_b32 s3, s0 +; GFX10-NEXT: v_cmp_lt_u64_e64 s4, s[0:1], s[4:5] +; GFX10-NEXT: s_subb_u32 s2, s6, s14 +; GFX10-NEXT: s_subb_u32 s3, s7, s15 +; GFX10-NEXT: s_mov_b32 s10, s8 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo -; GFX10-NEXT: s_cmp_eq_u64 s[10:11], s[6:7] +; GFX10-NEXT: s_cmp_eq_u64 s[2:3], s[6:7] ; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 -; GFX10-NEXT: v_cmp_lt_i64_e64 s4, s[10:11], s[6:7] +; GFX10-NEXT: v_cmp_lt_i64_e64 s4, s[2:3], s[6:7] ; GFX10-NEXT: v_cmp_gt_u64_e64 s6, s[12:13], 0 ; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v1, s16 @@ -6349,37 +6349,37 @@ ; GFX10-NEXT: s_cselect_b32 s5, 1, 0 ; GFX10-NEXT: v_and_b32_e32 v0, 1, v0 ; GFX10-NEXT: s_and_b32 s5, 1, s5 -; GFX10-NEXT: s_mov_b32 s2, s0 +; GFX10-NEXT: s_mov_b32 s11, s8 ; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s6 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc_lo ; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 -; GFX10-NEXT: v_mov_b32_e32 v7, s11 +; GFX10-NEXT: v_mov_b32_e32 v7, s3 +; GFX10-NEXT: s_ashr_i32 s4, s3, 31 +; GFX10-NEXT: s_add_u32 s5, s4, 0x80000000 ; GFX10-NEXT: v_cndmask_b32_e32 v4, v6, v5, vcc_lo ; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s18 ; GFX10-NEXT: v_mov_b32_e32 v5, s19 -; GFX10-NEXT: v_mov_b32_e32 v6, s9 +; GFX10-NEXT: v_mov_b32_e32 v6, s1 ; GFX10-NEXT: v_xor_b32_e32 v3, v4, v3 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, s1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s10, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s11, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, s9, vcc_lo ; GFX10-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX10-NEXT: v_mov_b32_e32 v5, s8 -; GFX10-NEXT: s_ashr_i32 s0, s11, 31 -; GFX10-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, s10 -; GFX10-NEXT: s_mov_b32 s3, s0 -; GFX10-NEXT: s_mov_b32 s2, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s3, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s1, vcc_lo -; GFX10-NEXT: v_readfirstlane_b32 s0, v1 +; GFX10-NEXT: v_mov_b32_e32 v5, s0 +; GFX10-NEXT: s_mov_b32 s6, s4 +; GFX10-NEXT: s_mov_b32 s0, s4 ; GFX10-NEXT: v_readfirstlane_b32 s1, v2 +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 +; GFX10-NEXT: v_mov_b32_e32 v3, s2 ; GFX10-NEXT: v_readfirstlane_b32 s2, v0 ; GFX10-NEXT: v_readfirstlane_b32 s3, v4 +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s4, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s6, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s0, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s5, vcc_lo +; GFX10-NEXT: v_readfirstlane_b32 s0, v1 ; GFX10-NEXT: v_readfirstlane_b32 s4, v5 ; GFX10-NEXT: v_readfirstlane_b32 s5, v6 ; GFX10-NEXT: v_readfirstlane_b32 s6, v3 @@ -6405,29 +6405,28 @@ ; GFX11-NEXT: v_cmp_gt_i64_e64 s2, s[10:11], 0 ; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 ; GFX11-NEXT: s_cselect_b32 s1, 1, 0 -; GFX11-NEXT: s_ashr_i32 s0, s19, 31 +; GFX11-NEXT: s_ashr_i32 s8, s19, 31 ; GFX11-NEXT: s_and_b32 s1, 1, s1 -; GFX11-NEXT: s_mov_b32 s3, s0 +; GFX11-NEXT: s_add_u32 s9, s8, 0x80000000 ; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2 ; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo ; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_sub_u32 s8, s4, s12 -; GFX11-NEXT: s_subb_u32 s9, s5, s13 -; GFX11-NEXT: s_subb_u32 s10, s6, s14 +; GFX11-NEXT: s_sub_u32 s0, s4, s12 +; GFX11-NEXT: s_subb_u32 s1, s5, s13 +; GFX11-NEXT: s_subb_u32 s2, s6, s14 +; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[0:1], s[4:5] ; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo -; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[8:9], s[4:5] -; GFX11-NEXT: s_subb_u32 s11, s7, s15 -; GFX11-NEXT: s_mov_b32 s2, s0 -; GFX11-NEXT: s_cmp_eq_u64 s[10:11], s[6:7] +; GFX11-NEXT: s_subb_u32 s3, s7, s15 +; GFX11-NEXT: s_mov_b32 s10, s8 +; GFX11-NEXT: s_cmp_eq_u64 s[2:3], s[6:7] +; GFX11-NEXT: s_mov_b32 s11, s8 ; GFX11-NEXT: v_xor_b32_e32 v0, v1, v0 -; GFX11-NEXT: v_mov_b32_e32 v1, s16 ; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 -; GFX11-NEXT: v_cmp_lt_i64_e64 s4, s[10:11], s[6:7] +; GFX11-NEXT: v_cmp_lt_i64_e64 s4, s[2:3], s[6:7] ; GFX11-NEXT: v_cmp_gt_u64_e64 s6, s[12:13], 0 -; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 +; GFX11-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v0, 1, v0 ; GFX11-NEXT: s_cselect_b32 s16, 1, 0 -; GFX11-NEXT: v_mov_b32_e32 v7, s11 +; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_cndmask_b32_e64 v4, 0, 1, s4 ; GFX11-NEXT: s_and_b32 s4, 1, s16 ; GFX11-NEXT: s_cmp_eq_u64 s[14:15], 0 @@ -6437,34 +6436,34 @@ ; GFX11-NEXT: s_cselect_b32 s5, 1, 0 ; GFX11-NEXT: v_mov_b32_e32 v2, s17 ; GFX11-NEXT: s_and_b32 s5, 1, s5 +; GFX11-NEXT: s_ashr_i32 s4, s3, 31 ; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc_lo ; GFX11-NEXT: v_cndmask_b32_e64 v6, 0, 1, s6 ; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5 +; GFX11-NEXT: s_add_u32 s5, s4, 0x80000000 +; GFX11-NEXT: s_mov_b32 s6, s4 ; GFX11-NEXT: v_dual_cndmask_b32 v4, v6, v5 :: v_dual_mov_b32 v5, s19 ; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX11-NEXT: v_mov_b32_e32 v6, s9 +; GFX11-NEXT: v_mov_b32_e32 v6, s1 ; GFX11-NEXT: v_xor_b32_e32 v3, v4, v3 ; GFX11-NEXT: v_mov_b32_e32 v0, s18 -; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s0, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s3, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v4, v5, s1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s8, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, s10, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v4, v5, s9, vcc_lo ; GFX11-NEXT: v_and_b32_e32 v3, 1, v3 -; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s2, vcc_lo -; GFX11-NEXT: v_mov_b32_e32 v5, s8 -; GFX11-NEXT: s_ashr_i32 s0, s11, 31 -; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GFX11-NEXT: v_mov_b32_e32 v3, s10 -; GFX11-NEXT: s_add_u32 s1, s0, 0x80000000 -; GFX11-NEXT: s_mov_b32 s3, s0 -; GFX11-NEXT: s_mov_b32 s2, s0 -; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, s0, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s3, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s2, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s1, vcc_lo -; GFX11-NEXT: v_readfirstlane_b32 s0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s11, vcc_lo +; GFX11-NEXT: v_mov_b32_e32 v5, s0 +; GFX11-NEXT: s_mov_b32 s0, s4 ; GFX11-NEXT: v_readfirstlane_b32 s1, v2 +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 +; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: v_readfirstlane_b32 s2, v0 ; GFX11-NEXT: v_readfirstlane_b32 s3, v4 +; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, s4, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s6, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, s0, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s5, vcc_lo +; GFX11-NEXT: v_readfirstlane_b32 s0, v1 ; GFX11-NEXT: v_readfirstlane_b32 s4, v5 ; GFX11-NEXT: v_readfirstlane_b32 s5, v6 ; GFX11-NEXT: v_readfirstlane_b32 s6, v3 diff --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll --- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll +++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll @@ -596,12 +596,12 @@ ; GFX908-NEXT: v_readfirstlane_b32 s9, v3 ; GFX908-NEXT: s_add_u32 s5, s5, 1 ; GFX908-NEXT: s_addc_u32 s9, s9, 0 -; GFX908-NEXT: s_mul_hi_u32 s19, s2, s5 -; GFX908-NEXT: s_mul_i32 s20, s3, s5 -; GFX908-NEXT: s_mul_i32 s18, s2, s5 -; GFX908-NEXT: s_mul_i32 s5, s2, s9 -; GFX908-NEXT: s_add_i32 s5, s19, s5 -; GFX908-NEXT: s_add_i32 s5, s5, s20 +; GFX908-NEXT: s_mul_hi_u32 s18, s2, s5 +; GFX908-NEXT: s_mul_i32 s9, s2, s9 +; GFX908-NEXT: s_mul_i32 s19, s3, s5 +; GFX908-NEXT: s_add_i32 s9, s18, s9 +; GFX908-NEXT: s_mul_i32 s5, s2, s5 +; GFX908-NEXT: s_add_i32 s9, s9, s19 ; GFX908-NEXT: s_branch .LBB3_5 ; GFX908-NEXT: .LBB3_4: ; %bb58 ; GFX908-NEXT: ; in Loop: Header=BB3_5 Depth=2 @@ -614,15 +614,15 @@ ; GFX908-NEXT: .LBB3_5: ; %bb16 ; GFX908-NEXT: ; Parent Loop BB3_2 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX908-NEXT: s_add_u32 s20, s16, s18 -; GFX908-NEXT: s_addc_u32 s21, s17, s5 -; GFX908-NEXT: global_load_dword v21, v19, s[20:21] offset:-12 glc +; GFX908-NEXT: s_add_u32 s18, s16, s5 +; GFX908-NEXT: s_addc_u32 s19, s17, s9 +; GFX908-NEXT: global_load_dword v21, v19, s[18:19] offset:-12 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: global_load_dword v20, v19, s[20:21] offset:-8 glc +; GFX908-NEXT: global_load_dword v20, v19, s[18:19] offset:-8 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: global_load_dword v12, v19, s[20:21] offset:-4 glc +; GFX908-NEXT: global_load_dword v12, v19, s[18:19] offset:-4 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: global_load_dword v12, v19, s[20:21] glc +; GFX908-NEXT: global_load_dword v12, v19, s[18:19] glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: ds_read_b64 v[12:13], v19 ; GFX908-NEXT: ds_read_b64 v[14:15], v0 @@ -725,12 +725,12 @@ ; GFX90A-NEXT: v_readfirstlane_b32 s9, v5 ; GFX90A-NEXT: s_add_u32 s5, s5, 1 ; GFX90A-NEXT: s_addc_u32 s9, s9, 0 -; GFX90A-NEXT: s_mul_hi_u32 s19, s2, s5 -; GFX90A-NEXT: s_mul_i32 s20, s3, s5 -; GFX90A-NEXT: s_mul_i32 s18, s2, s5 -; GFX90A-NEXT: s_mul_i32 s5, s2, s9 -; GFX90A-NEXT: s_add_i32 s5, s19, s5 -; GFX90A-NEXT: s_add_i32 s5, s5, s20 +; GFX90A-NEXT: s_mul_hi_u32 s18, s2, s5 +; GFX90A-NEXT: s_mul_i32 s9, s2, s9 +; GFX90A-NEXT: s_mul_i32 s19, s3, s5 +; GFX90A-NEXT: s_add_i32 s9, s18, s9 +; GFX90A-NEXT: s_mul_i32 s5, s2, s5 +; GFX90A-NEXT: s_add_i32 s9, s9, s19 ; GFX90A-NEXT: s_branch .LBB3_5 ; GFX90A-NEXT: .LBB3_4: ; %bb58 ; GFX90A-NEXT: ; in Loop: Header=BB3_5 Depth=2 @@ -743,20 +743,20 @@ ; GFX90A-NEXT: .LBB3_5: ; %bb16 ; GFX90A-NEXT: ; Parent Loop BB3_2 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX90A-NEXT: s_add_u32 s20, s16, s18 -; GFX90A-NEXT: s_addc_u32 s21, s17, s5 -; GFX90A-NEXT: global_load_dword v21, v19, s[20:21] offset:-12 glc +; GFX90A-NEXT: s_add_u32 s18, s16, s5 +; GFX90A-NEXT: s_addc_u32 s19, s17, s9 +; GFX90A-NEXT: global_load_dword v21, v19, s[18:19] offset:-12 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: global_load_dword v20, v19, s[20:21] offset:-8 glc +; GFX90A-NEXT: global_load_dword v20, v19, s[18:19] offset:-8 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: global_load_dword v14, v19, s[20:21] offset:-4 glc +; GFX90A-NEXT: global_load_dword v14, v19, s[18:19] offset:-4 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: global_load_dword v14, v19, s[20:21] glc +; GFX90A-NEXT: global_load_dword v14, v19, s[18:19] glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: ds_read_b64 v[14:15], v19 ; GFX90A-NEXT: ds_read_b64 v[16:17], v0 ; GFX90A-NEXT: s_and_b64 vcc, exec, s[14:15] -; GFX90A-NEXT: ; kill: killed $sgpr20 killed $sgpr21 +; GFX90A-NEXT: ; kill: killed $sgpr18 killed $sgpr19 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) ; GFX90A-NEXT: s_cbranch_vccnz .LBB3_4 ; GFX90A-NEXT: ; %bb.6: ; %bb51 diff --git a/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir b/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir --- a/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir +++ b/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir @@ -18,7 +18,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -29,7 +29,7 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_CEIL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CEIL_F32_e32 [[V_CEIL_F32_e32_]], implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 bb.0: liveins: $vgpr0 @@ -59,20 +59,20 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: undef %1.sub1:vreg_64 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: S_NOP 0, implicit undef %0.sub0 - ; CHECK-NEXT: S_NOP 0, implicit undef %0.sub0 + ; CHECK-NEXT: S_NOP 0, implicit undef %1.sub0 + ; CHECK-NEXT: S_NOP 0, implicit undef %1.sub0 ; CHECK-NEXT: S_CBRANCH_EXECNZ %bb.1, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: undef %1.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %1.sub1, implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 bb.0: liveins: $vgpr0 @@ -102,7 +102,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -113,7 +113,7 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = nofpexcept V_MUL_F32_e32 0, %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e32 0, [[V_MUL_F32_e32_]], implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 bb.0: liveins: $vgpr0 @@ -143,7 +143,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -154,7 +154,7 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: %0.sub1:vreg_64 = nofpexcept V_MUL_F32_e32 0, %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e32 0, [[V_MUL_F32_e32_]], implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 bb.0: liveins: $vgpr0 @@ -185,7 +185,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: undef %1.sub1:vreg_64 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) @@ -195,12 +195,12 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: S_NOP 0, implicit undef %0.sub0 - ; CHECK-NEXT: undef %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit undef %1.sub0 + ; CHECK-NEXT: undef %1.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %1.sub1, implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: - ; CHECK-NEXT: S_NOP 0, implicit undef %0.sub0 + ; CHECK-NEXT: S_NOP 0, implicit undef %1.sub0 bb.0: liveins: $vgpr0 @@ -229,7 +229,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: dead undef %1.sub1:vreg_64 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) @@ -237,7 +237,7 @@ ; CHECK-NEXT: S_CBRANCH_EXECNZ %bb.1, implicit $exec ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: S_NOP 0, implicit undef %0.sub0 + ; CHECK-NEXT: S_NOP 0, implicit undef %1.sub0 bb.0: liveins: $vgpr0 @@ -261,7 +261,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) @@ -269,7 +269,7 @@ ; CHECK-NEXT: S_CBRANCH_EXECNZ %bb.1, implicit $exec ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: S_NOP 0, implicit %0.sub1 + ; CHECK-NEXT: S_NOP 0, implicit [[COPY]] bb.0: liveins: $vgpr0 @@ -295,7 +295,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %2.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -336,7 +336,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -376,7 +376,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -417,7 +417,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -428,7 +428,7 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_CEIL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CEIL_F32_e32 [[V_CEIL_F32_e32_]], implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 bb.0: liveins: $vgpr0 @@ -458,7 +458,7 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $vgpr0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub1:vreg_64 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) @@ -469,7 +469,7 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: %0.sub1:vreg_64 = nofpexcept V_CEIL_F32_e32 %0.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_CEIL_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CEIL_F32_e32 [[V_CEIL_F32_e32_]], implicit $mode, implicit $exec ; CHECK-NEXT: S_BRANCH %bb.1 bb.0: liveins: $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/dead-lane.mir b/llvm/test/CodeGen/AMDGPU/dead-lane.mir --- a/llvm/test/CodeGen/AMDGPU/dead-lane.mir +++ b/llvm/test/CodeGen/AMDGPU/dead-lane.mir @@ -1,10 +1,11 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=amdgcn -mcpu=tonga %s -start-before detect-dead-lanes -stop-before machine-scheduler -verify-machineinstrs -o - | FileCheck -check-prefix=GCN %s # RUN: llc -march=amdgcn -mcpu=tonga %s -start-before detect-dead-lanes -stop-before machine-scheduler -verify-machineinstrs -early-live-intervals -o - | FileCheck -check-prefix=GCN %s # GCN-LABEL: name: dead_lane # GCN: bb.0: -# GCN-NEXT: undef %3.sub0:vreg_64 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %3.sub0, implicit $mode, implicit $exec -# GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %3.sub0, +# GCN-NEXT: %5:vgpr_32 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %5, implicit $mode, implicit $exec +# GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %5, --- name: dead_lane tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; When EXPENSIVE_CHECKS are enabled, the machine verifier appears between each ; pass. Ignore it with 'grep -v'. ; RUN: llc -O0 -mtriple=amdgcn--amdhsa -disable-verify -debug-pass=Structure < %s 2>&1 \ @@ -334,6 +335,7 @@ ; GCN-O1-NEXT: Machine Natural Loop Construction ; GCN-O1-NEXT: Simple Register Coalescing ; GCN-O1-NEXT: Rename Disconnected Subregister Components +; GCN-O1-NEXT: Rewrite Partial Register Uses ; GCN-O1-NEXT: Machine Instruction Scheduler ; GCN-O1-NEXT: MachinePostDominator Tree Construction ; GCN-O1-NEXT: SI Whole Quad Mode @@ -631,6 +633,7 @@ ; GCN-O1-OPTS-NEXT: Machine Natural Loop Construction ; GCN-O1-OPTS-NEXT: Simple Register Coalescing ; GCN-O1-OPTS-NEXT: Rename Disconnected Subregister Components +; GCN-O1-OPTS-NEXT: Rewrite Partial Register Uses ; GCN-O1-OPTS-NEXT: AMDGPU Pre-RA optimizations ; GCN-O1-OPTS-NEXT: Machine Instruction Scheduler ; GCN-O1-OPTS-NEXT: MachinePostDominator Tree Construction @@ -930,6 +933,7 @@ ; GCN-O2-NEXT: Machine Natural Loop Construction ; GCN-O2-NEXT: Simple Register Coalescing ; GCN-O2-NEXT: Rename Disconnected Subregister Components +; GCN-O2-NEXT: Rewrite Partial Register Uses ; GCN-O2-NEXT: AMDGPU Pre-RA optimizations ; GCN-O2-NEXT: Machine Instruction Scheduler ; GCN-O2-NEXT: MachinePostDominator Tree Construction @@ -1241,6 +1245,7 @@ ; GCN-O3-NEXT: Machine Natural Loop Construction ; GCN-O3-NEXT: Simple Register Coalescing ; GCN-O3-NEXT: Rename Disconnected Subregister Components +; GCN-O3-NEXT: Rewrite Partial Register Uses ; GCN-O3-NEXT: AMDGPU Pre-RA optimizations ; GCN-O3-NEXT: Machine Instruction Scheduler ; GCN-O3-NEXT: MachinePostDominator Tree Construction diff --git a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll @@ -184,22 +184,22 @@ ; GFX10-NEXT: v_mov_b32_e32 v5, v1 ; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v4, v2, 0 ; GFX10-NEXT: v_mad_u64_u32 v[6:7], s4, v4, v3, 0 -; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v5, v2, 0 -; GFX10-NEXT: v_mad_i64_i32 v[11:12], s4, v5, v3, 0 -; GFX10-NEXT: v_mov_b32_e32 v8, v1 -; GFX10-NEXT: v_add3_u32 v1, v1, v6, v9 -; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v8, v6 +; GFX10-NEXT: v_mad_u64_u32 v[8:9], s4, v5, v2, 0 +; GFX10-NEXT: v_mad_i64_i32 v[10:11], s4, v5, v3, 0 +; GFX10-NEXT: v_mov_b32_e32 v12, v1 +; GFX10-NEXT: v_add3_u32 v1, v1, v6, v8 +; GFX10-NEXT: v_add_co_u32 v12, vcc_lo, v12, v6 ; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo -; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v8, v9 -; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v12, vcc_lo -; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v7, v11 -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo +; GFX10-NEXT: v_add_co_u32 v12, vcc_lo, v12, v8 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v11, vcc_lo +; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v7, v10 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo ; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v7, v2 -; GFX10-NEXT: v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo +; GFX10-NEXT: v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v9, vcc_lo ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v5 ; GFX10-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc_lo ; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v1 ; GFX10-NEXT: v_sub_co_u32 v4, vcc_lo, v6, v4 ; GFX10-NEXT: v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v5, vcc_lo @@ -219,28 +219,28 @@ ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, 0 ; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v4, v3, 0 -; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v5, v2, 0 -; GFX11-NEXT: v_mad_i64_i32 v[11:12], null, v5, v3, 0 +; GFX11-NEXT: v_mad_u64_u32 v[8:9], null, v5, v2, 0 +; GFX11-NEXT: v_mad_i64_i32 v[10:11], null, v5, v3, 0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v8, v1 -; GFX11-NEXT: v_add3_u32 v1, v1, v6, v9 +; GFX11-NEXT: v_mov_b32_e32 v12, v1 +; GFX11-NEXT: v_add3_u32 v1, v1, v6, v8 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, v6 +; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, v6 ; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo -; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, v9 +; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, v8 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo -; GFX11-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v12, vcc_lo -; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v7, v11 +; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v11, vcc_lo +; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v7, v10 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo ; GFX11-NEXT: v_sub_co_u32 v2, vcc_lo, v7, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo +; GFX11-NEXT: v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v9, vcc_lo ; GFX11-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v5 ; GFX11-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc_lo ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_cndmask_b32_e32 v5, v8, v10, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc_lo ; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v1 ; GFX11-NEXT: v_sub_co_u32 v4, vcc_lo, v6, v4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll --- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll @@ -6257,22 +6257,22 @@ ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v7, 16, v2 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v0, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[10:11], v[0:1], 48 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v5, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[14:15], v[2:3], 48 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v1, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[14:15], v[0:1], 48 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[2:3], v[2:3], 48 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v5, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v6, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v7, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v6, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v10, v7, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:16 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_endpgm ; @@ -6300,24 +6300,24 @@ ; GCN-HSA-NEXT: s_waitcnt vmcnt(0) ; GCN-HSA-NEXT: v_ashr_i64 v[6:7], v[0:1], 48 ; GCN-HSA-NEXT: v_bfe_i32 v4, v1, 0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, v3 +; GCN-HSA-NEXT: v_mov_b32_e32 v18, v3 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v10, 16, v0 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v10, 16, v2 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v19, 16, v2 ; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[4:7] -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GCN-HSA-NEXT: v_bfe_i32 v8, v18, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v6, v10, 0, 16 +; GCN-HSA-NEXT: v_ashr_i64 v[10:11], v[2:3], 48 ; GCN-HSA-NEXT: v_bfe_i32 v4, v0, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v8, v2, 0, 16 -; GCN-HSA-NEXT: v_ashr_i64 v[2:3], v[2:3], 48 -; GCN-HSA-NEXT: v_bfe_i32 v0, v11, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v6, v1, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v10, v10, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-HSA-NEXT: v_bfe_i32 v0, v2, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v2, v19, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v6 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v10 -; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[0:3] -; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11] +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2 +; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11] +; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3] ; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[4:7] ; GCN-HSA-NEXT: s_endpgm ; @@ -6337,15 +6337,15 @@ ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, v3 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v10, 16, v2 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v12, v11, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v14, v3, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v4, v0, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v0, v1, 0, 16 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v6, v5, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v8, v2, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v6, v6, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v2, v7, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v10, v10, 0, 16 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 @@ -6857,51 +6857,51 @@ ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, v7 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, v3 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v13, 16, v4 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v14, 16, v2 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v15, 16, v0 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v8, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[10:11], v[6:7], 48 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, v7 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, v3 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v16, 16, v4 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v2 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v0 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v0, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v9, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[11:12], v[6:7], 48 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:112 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[9:10], v[4:5], 48 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v7, v5, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v8, 31, v7 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[7:10], off, s[0:3], 0 offset:80 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v5, v0, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[11:12], v[4:5], 48 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v5, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:80 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v7, v15, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v12, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[10:11], v[2:3], 48 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[11:12], v[2:3], 48 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v13, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[14:15], v[0:1], 48 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashr_i64 v[11:12], v[0:1], 48 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v1, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v2, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v14, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v15, v13, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v4, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v10, v18, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v17, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v18, v16, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v16, v4, 0, 16 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v17, v6, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v19, v1, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v6, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v18, 31, v17 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v8, 31, v7 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v16, 31, v15 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v20, 31, v19 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:16 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:96 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:64 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v19, 31, v18 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:64 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[5:8], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: global_sextload_v16i16_to_v16i64: @@ -6966,29 +6966,29 @@ ; GCN-HSA-NEXT: v_ashr_i64 v[2:3], v[4:5], 48 ; GCN-HSA-NEXT: v_bfe_i32 v0, v5, 0, 16 ; GCN-HSA-NEXT: s_add_u32 s0, s0, 64 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, v7 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v8, 16, v6 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, v7 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v10, 16, v4 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v9, 16, v6 ; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3] -; GCN-HSA-NEXT: v_bfe_i32 v10, v8, 0, 16 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; GCN-HSA-NEXT: v_bfe_i32 v0, v4, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v8, v6, 0, 16 -; GCN-HSA-NEXT: v_ashr_i64 v[6:7], v[6:7], 48 -; GCN-HSA-NEXT: v_bfe_i32 v4, v11, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v8, v8, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v2, v10, 0, 16 +; GCN-HSA-NEXT: v_ashr_i64 v[10:11], v[6:7], 48 ; GCN-HSA-NEXT: v_mov_b32_e32 v15, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v21, s1 -; GCN-HSA-NEXT: v_bfe_i32 v2, v1, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-HSA-NEXT: v_bfe_i32 v0, v4, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v4, v6, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v6, v9, 0, 16 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 ; GCN-HSA-NEXT: v_mov_b32_e32 v14, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v20, s0 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v10 -; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[4:7] -; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11] +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v6 +; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] +; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[4:7] ; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[0:3] ; GCN-HSA-NEXT: s_endpgm ; @@ -7009,54 +7009,54 @@ ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(1) ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v8, v0, 0, 16 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v9, 16, v5 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v9, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v9, v5, 0, 16 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v5, 0, 16 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:80 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, v7 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v5, 0, 16 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v13, 16, v0 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v9, v4, 0, 16 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v4, 0, 16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, v7 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v4, 16, v7 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:64 -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v9, v13, 0, 16 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v4, 0, 16 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v9, v0, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v7, 0, 16 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:112 -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v12, v1, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v0, v1, 0, 16 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v14, v1, 0, 16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, v3 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v16, v1, 0, 16 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:112 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, v3 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v12, v2, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v2, v1, 0, 16 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v10, v0, 0, 16 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v0, v2, 0, 16 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v2, v4, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v14, v4, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v4, v6, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v6, v1, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v10, v13, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v16, v5, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v18, v3, 0, 16 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v5, 31, v4 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v7, 31, v6 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v11, 31, v10 -; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 +; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v17, 31, v16 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v19, 31, v18 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:48 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 ; GCN-NOHSA-VI-NEXT: s_endpgm ; @@ -8042,24 +8042,24 @@ ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:224 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v12 ; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v9, 16, v4 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v7, 16, v10 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v12, 0, 16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v1, 0, 16 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v9, 16, v4 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v15, 16, v8 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:192 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v7, 16, v10 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v3, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v2, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v3, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:160 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v3, v1, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v1, v8, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v5, v10, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v3, v15, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v7, v7, 0, 16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v9, 0, 16 @@ -8191,11 +8191,11 @@ ; GCN-HSA-NEXT: v_bfe_i32 v7, v13, 0, 16 ; GCN-HSA-NEXT: v_mov_b32_e32 v16, s12 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 31, v7 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, v15 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, v15 ; GCN-HSA-NEXT: v_mov_b32_e32 v19, s11 ; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[7:10] ; GCN-HSA-NEXT: v_mov_b32_e32 v18, s10 -; GCN-HSA-NEXT: v_bfe_i32 v7, v3, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v7, v5, 0, 16 ; GCN-HSA-NEXT: v_ashr_i64 v[9:10], v[14:15], 48 ; GCN-HSA-NEXT: v_lshrrev_b32_e32 v1, 16, v2 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 31, v7 @@ -8237,9 +8237,9 @@ ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 ; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60 ; GCN-HSA-NEXT: v_lshrrev_b32_e32 v9, 16, v14 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v10, 16, v12 ; GCN-HSA-NEXT: flat_store_dwordx4 v[7:8], v[0:3] ; GCN-HSA-NEXT: v_bfe_i32 v7, v14, 0, 16 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v2, 16, v12 ; GCN-HSA-NEXT: v_bfe_i32 v0, v12, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 31, v13 @@ -8249,10 +8249,10 @@ ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 ; GCN-HSA-NEXT: s_add_u32 s0, s0, 64 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 31, v7 +; GCN-HSA-NEXT: v_bfe_i32 v2, v10, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 ; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 -; GCN-HSA-NEXT: v_bfe_i32 v2, v2, 0, 16 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[7:10] ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 @@ -8332,19 +8332,19 @@ ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v13, v1, 0, 16 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 +; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v15, 16, v8 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:192 -; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v17, v0, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v2, 0, 16 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v2, 16, v6 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v13, v3, 0, 16 +; GCN-NOHSA-VI-NEXT: v_bfe_i32 v3, v15, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v15, v2, 0, 16 ; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v17, v0, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v19, v2, 0, 16 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:160 -; GCN-NOHSA-VI-NEXT: v_bfe_i32 v3, v1, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v1, v8, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v7, v7, 0, 16 ; GCN-NOHSA-VI-NEXT: v_bfe_i32 v11, v9, 0, 16 diff --git a/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll b/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll --- a/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll +++ b/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll @@ -8,9 +8,9 @@ define <3 x float> @liveout_undef_subrange(<3 x float> %arg) { ; CHECK-LABEL: liveout_undef_subrange: ; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_add_f32_e32 v3, v2, v2 ; CHECK-NEXT: ; kill: killed $vgpr1 +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_add_f32_e32 v1, v2, v2 ; CHECK-NEXT: v_add_f32_e32 v0, v0, v0 ; CHECK-NEXT: .LBB0_1: ; %bb1 ; CHECK-NEXT: ; =>This Loop Header: Depth=1 @@ -26,7 +26,7 @@ ; CHECK-NEXT: ; %bb.3: ; %bb2 ; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1 ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] -; CHECK-NEXT: v_mul_f32_e32 v2, v3, v2 +; CHECK-NEXT: v_mul_f32_e32 v2, v1, v2 ; CHECK-NEXT: s_branch .LBB0_1 bb: br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -159,24 +159,24 @@ ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v0, v1, 0 -; CI-NEXT: v_ashrrev_i32_e32 v13, 31, v0 +; CI-NEXT: v_ashrrev_i32_e32 v12, 31, v0 ; CI-NEXT: v_mov_b32_e32 v8, 0 -; CI-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v13, v1, v[7:8] -; CI-NEXT: v_ashrrev_i32_e32 v14, 31, v1 -; CI-NEXT: v_mad_i64_i32 v[11:12], s[4:5], v1, v13, 0 -; CI-NEXT: v_mov_b32_e32 v7, v10 +; CI-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v12, v1, v[7:8] +; CI-NEXT: v_ashrrev_i32_e32 v13, 31, v1 +; CI-NEXT: v_mov_b32_e32 v11, v10 ; CI-NEXT: v_mov_b32_e32 v10, v8 -; CI-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v14, v[9:10] -; CI-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v14, v0, v[11:12] -; CI-NEXT: v_add_i32_e32 v9, vcc, v7, v9 -; CI-NEXT: v_addc_u32_e64 v10, s[4:5], 0, 0, vcc -; CI-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v13, v14, v[9:10] -; CI-NEXT: v_add_i32_e32 v7, vcc, v9, v0 -; CI-NEXT: v_addc_u32_e32 v9, vcc, v10, v1, vcc -; CI-NEXT: v_mov_b32_e32 v1, v8 +; CI-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v0, v13, v[9:10] +; CI-NEXT: v_add_i32_e32 v8, vcc, v11, v8 +; CI-NEXT: v_mad_i64_i32 v[10:11], s[4:5], v1, v12, 0 +; CI-NEXT: v_addc_u32_e64 v9, s[4:5], 0, 0, vcc +; CI-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v13, v[8:9] +; CI-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v13, v0, v[10:11] +; CI-NEXT: v_add_i32_e32 v8, vcc, v8, v0 +; CI-NEXT: v_addc_u32_e32 v9, vcc, v9, v1, vcc +; CI-NEXT: v_mov_b32_e32 v1, v7 ; CI-NEXT: v_add_i32_e32 v0, vcc, v6, v2 ; CI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; CI-NEXT: v_addc_u32_e32 v2, vcc, v7, v4, vcc +; CI-NEXT: v_addc_u32_e32 v2, vcc, v8, v4, vcc ; CI-NEXT: v_addc_u32_e32 v3, vcc, v9, v5, vcc ; CI-NEXT: s_setpc_b64 s[30:31] ; @@ -217,27 +217,26 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v0, v1, 0 -; GFX9-NEXT: v_ashrrev_i32_e32 v13, 31, v0 +; GFX9-NEXT: v_ashrrev_i32_e32 v12, 31, v0 ; GFX9-NEXT: v_mov_b32_e32 v9, 0 ; GFX9-NEXT: v_mov_b32_e32 v8, v7 -; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v13, v1, v[8:9] +; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v12, v1, v[8:9] ; GFX9-NEXT: v_ashrrev_i32_e32 v14, 31, v1 -; GFX9-NEXT: v_mov_b32_e32 v8, v11 +; GFX9-NEXT: v_mov_b32_e32 v7, v11 ; GFX9-NEXT: v_mov_b32_e32 v11, v9 -; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v0, v14, v[10:11] -; GFX9-NEXT: v_mov_b32_e32 v12, v11 -; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v8, v12 -; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, 0, vcc -; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v13, v14, v[8:9] -; GFX9-NEXT: v_mad_i64_i32 v[12:13], s[4:5], v1, v13, 0 +; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v14, v[10:11] +; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v7, v9 +; GFX9-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, 0, vcc +; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v12, v14, v[10:11] +; GFX9-NEXT: v_mad_i64_i32 v[12:13], s[4:5], v1, v12, 0 ; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v14, v0, v[12:13] -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v8, v0 -; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v9, v1, vcc -; GFX9-NEXT: v_mov_b32_e32 v1, v10 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v10, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v11, v1, vcc +; GFX9-NEXT: v_mov_b32_e32 v1, v8 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v6, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v7, v4, vcc -; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v8, v5, vcc +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v9, v5, vcc ; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: mad_i64_i32_sextops_i32_i128: @@ -250,26 +249,24 @@ ; GFX11-NEXT: v_ashrrev_i32_e32 v15, 31, v1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v14, v1, v[7:8] -; GFX11-NEXT: v_dual_mov_b32 v7, v10 :: v_dual_mov_b32 v10, v8 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mad_u64_u32 v[11:12], null, v0, v15, v[9:10] -; GFX11-NEXT: v_mad_i64_i32 v[9:10], null, v1, v14, 0 -; GFX11-NEXT: v_mov_b32_e32 v8, v12 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mad_i64_i32 v[12:13], null, v15, v0, v[9:10] -; GFX11-NEXT: v_add_co_u32 v7, s0, v7, v8 +; GFX11-NEXT: v_dual_mov_b32 v11, v10 :: v_dual_mov_b32 v10, v8 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, 0, 0, s0 -; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v14, v15, v[7:8] -; GFX11-NEXT: v_mov_b32_e32 v7, v11 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v0, v15, v[9:10] +; GFX11-NEXT: v_mov_b32_e32 v10, v8 +; GFX11-NEXT: v_mad_i64_i32 v[8:9], null, v1, v14, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_co_u32 v10, s0, v11, v10 +; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, 0, 0, s0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_mad_i64_i32 v[12:13], null, v15, v0, v[8:9] +; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v14, v15, v[10:11] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v0, v12 ; GFX11-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v1, v13, vcc_lo ; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v6, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v7, v3, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v8, v4, vcc_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v9, v5, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %sext0 = sext i32 %arg0 to i128 diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll --- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll @@ -438,105 +438,105 @@ ; GFX8-NEXT: s_swappc_b64 s[30:31], s[4:5] ; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v0 ; GFX8-NEXT: v_mov_b32_e32 v2, 0 -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 17, v0 -; GFX8-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2] -; GFX8-NEXT: v_and_b32_e32 v0, 0xfe000000, v0 -; GFX8-NEXT: v_or_b32_e32 v1, v0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s35 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s34, v1 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v2, v3, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 17, v0 +; GFX8-NEXT: v_lshlrev_b64 v[0:1], 3, v[1:2] +; GFX8-NEXT: v_and_b32_e32 v6, 0xfe000000, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX8-NEXT: v_mov_b32_e32 v2, s35 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc ; GFX8-NEXT: s_movk_i32 s0, 0x5000 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; GFX8-NEXT: v_mov_b32_e32 v4, 0 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GFX8-NEXT: v_mov_b32_e32 v5, 0 -; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc -; GFX8-NEXT: v_mov_b32_e32 v6, 0 ; GFX8-NEXT: s_movk_i32 s4, 0x7f ; GFX8-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB1_2 Depth 2 -; GFX8-NEXT: v_mov_b32_e32 v4, v2 ; GFX8-NEXT: v_mov_b32_e32 v3, v1 +; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: s_mov_b32 s5, 0 ; GFX8-NEXT: .LBB1_2: ; %for.body ; GFX8-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0xffffb000, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0xffffb000, v2 ; GFX8-NEXT: s_mov_b64 s[0:1], vcc -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xffffb800, v3 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xffffb800, v2 ; GFX8-NEXT: s_mov_b64 s[2:3], vcc -; GFX8-NEXT: v_addc_u32_e64 v8, vcc, -1, v4, s[0:1] +; GFX8-NEXT: v_addc_u32_e64 v8, vcc, -1, v3, s[0:1] ; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[7:8] -; GFX8-NEXT: v_add_u32_e32 v11, vcc, 0xffffc000, v3 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, 0xffffc000, v2 ; GFX8-NEXT: s_mov_b64 s[0:1], vcc -; GFX8-NEXT: v_addc_u32_e64 v10, vcc, -1, v4, s[2:3] +; GFX8-NEXT: v_addc_u32_e64 v10, vcc, -1, v3, s[2:3] ; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10] -; GFX8-NEXT: v_add_u32_e32 v13, vcc, 0xffffc800, v3 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, 0xffffc800, v2 ; GFX8-NEXT: s_mov_b64 s[2:3], vcc -; GFX8-NEXT: v_addc_u32_e64 v12, vcc, -1, v4, s[0:1] +; GFX8-NEXT: v_addc_u32_e64 v12, vcc, -1, v3, s[0:1] ; GFX8-NEXT: s_addk_i32 s5, 0x2000 ; GFX8-NEXT: s_cmp_gt_u32 s5, 0x3fffff ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_add_u32_e32 v15, vcc, v7, v5 -; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v8, v6, vcc +; GFX8-NEXT: v_add_u32_e32 v15, vcc, v7, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v8, v5, vcc ; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[11:12] -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0xffffd000, v3 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffd000, v2 ; GFX8-NEXT: s_mov_b64 s[0:1], vcc -; GFX8-NEXT: v_addc_u32_e64 v14, vcc, -1, v4, s[2:3] +; GFX8-NEXT: v_addc_u32_e64 v14, vcc, -1, v3, s[2:3] ; GFX8-NEXT: flat_load_dwordx2 v[11:12], v[13:14] ; GFX8-NEXT: s_waitcnt vmcnt(2) ; GFX8-NEXT: v_add_u32_e32 v15, vcc, v9, v15 -; GFX8-NEXT: v_addc_u32_e32 v10, vcc, v10, v6, vcc -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xffffd800, v3 +; GFX8-NEXT: v_addc_u32_e32 v10, vcc, v10, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xffffd800, v2 ; GFX8-NEXT: s_mov_b64 s[2:3], vcc -; GFX8-NEXT: v_addc_u32_e64 v6, vcc, -1, v4, s[0:1] -; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[5:6] +; GFX8-NEXT: v_addc_u32_e64 v5, vcc, -1, v3, s[0:1] +; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(2) ; GFX8-NEXT: v_add_u32_e32 v13, vcc, v7, v15 ; GFX8-NEXT: v_addc_u32_e32 v8, vcc, v8, v10, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0xffffe000, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0xffffe000, v2 ; GFX8-NEXT: s_mov_b64 s[0:1], vcc -; GFX8-NEXT: v_addc_u32_e64 v10, vcc, -1, v4, s[2:3] +; GFX8-NEXT: v_addc_u32_e64 v10, vcc, -1, v3, s[2:3] ; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10] ; GFX8-NEXT: s_waitcnt vmcnt(2) ; GFX8-NEXT: v_add_u32_e32 v13, vcc, v11, v13 ; GFX8-NEXT: v_addc_u32_e32 v12, vcc, v12, v8, vcc -; GFX8-NEXT: v_add_u32_e32 v11, vcc, 0xffffe800, v3 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, 0xffffe800, v2 ; GFX8-NEXT: s_mov_b64 s[2:3], vcc -; GFX8-NEXT: v_addc_u32_e64 v8, vcc, -1, v4, s[0:1] +; GFX8-NEXT: v_addc_u32_e64 v8, vcc, -1, v3, s[0:1] ; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[7:8] ; GFX8-NEXT: s_waitcnt vmcnt(2) -; GFX8-NEXT: v_add_u32_e32 v13, vcc, v5, v13 -; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v12, vcc -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0xfffff000, v3 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, v4, v13 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v12, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xfffff000, v2 ; GFX8-NEXT: s_mov_b64 s[0:1], vcc -; GFX8-NEXT: v_addc_u32_e64 v12, vcc, -1, v4, s[2:3] +; GFX8-NEXT: v_addc_u32_e64 v12, vcc, -1, v3, s[2:3] ; GFX8-NEXT: flat_load_dwordx2 v[11:12], v[11:12] ; GFX8-NEXT: s_waitcnt vmcnt(2) ; GFX8-NEXT: v_add_u32_e32 v13, vcc, v9, v13 -; GFX8-NEXT: v_addc_u32_e32 v14, vcc, v10, v6, vcc -; GFX8-NEXT: v_addc_u32_e64 v6, s[0:1], -1, v4, s[0:1] -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xfffff800, v3 -; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[5:6] -; GFX8-NEXT: v_addc_u32_e32 v10, vcc, -1, v4, vcc +; GFX8-NEXT: v_addc_u32_e32 v14, vcc, v10, v5, vcc +; GFX8-NEXT: v_addc_u32_e64 v5, s[0:1], -1, v3, s[0:1] +; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xfffff800, v2 +; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[4:5] +; GFX8-NEXT: v_addc_u32_e32 v10, vcc, -1, v3, vcc ; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10] ; GFX8-NEXT: s_waitcnt vmcnt(3) ; GFX8-NEXT: v_add_u32_e32 v13, vcc, v7, v13 ; GFX8-NEXT: v_addc_u32_e32 v14, vcc, v8, v14, vcc -; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[3:4] -; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x10000, v3 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[2:3] +; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x10000, v2 +; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc ; GFX8-NEXT: s_waitcnt vmcnt(3) ; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v13 ; GFX8-NEXT: v_addc_u32_e32 v12, vcc, v12, v14, vcc ; GFX8-NEXT: s_waitcnt vmcnt(2) -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v11 -; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v12, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v11 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v12, vcc ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v9, v5 -; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v10, v6, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v9, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v10, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v7, v5 -; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v8, v6, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v8, v5, vcc ; GFX8-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX8-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX8-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -548,9 +548,9 @@ ; GFX8-NEXT: s_branch .LBB1_1 ; GFX8-NEXT: .LBB1_5: ; %while.end ; GFX8-NEXT: v_mov_b32_e32 v1, s35 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v0 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v6 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[5:6] +; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5] ; GFX8-NEXT: s_endpgm ; ; GFX900-LABEL: clmem_read: @@ -575,89 +575,89 @@ ; GFX900-NEXT: s_swappc_b64 s[30:31], s[4:5] ; GFX900-NEXT: v_and_b32_e32 v1, 0xff, v0 ; GFX900-NEXT: v_mov_b32_e32 v2, 0 -; GFX900-NEXT: v_lshlrev_b32_e32 v0, 17, v0 -; GFX900-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2] -; GFX900-NEXT: v_and_b32_e32 v0, 0xfe000000, v0 -; GFX900-NEXT: v_or_b32_e32 v1, v0, v1 -; GFX900-NEXT: v_mov_b32_e32 v3, s35 -; GFX900-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1 -; GFX900-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v3, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 17, v0 +; GFX900-NEXT: v_lshlrev_b64 v[0:1], 3, v[1:2] +; GFX900-NEXT: v_and_b32_e32 v6, 0xfe000000, v3 +; GFX900-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX900-NEXT: v_mov_b32_e32 v2, s35 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 +; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc ; GFX900-NEXT: s_movk_i32 s0, 0x5000 -; GFX900-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 -; GFX900-NEXT: v_mov_b32_e32 v5, 0 -; GFX900-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX900-NEXT: v_mov_b32_e32 v4, 0 +; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX900-NEXT: s_movk_i32 s4, 0x7f -; GFX900-NEXT: v_mov_b32_e32 v6, 0 +; GFX900-NEXT: v_mov_b32_e32 v5, 0 ; GFX900-NEXT: s_movk_i32 s2, 0xd000 ; GFX900-NEXT: s_movk_i32 s3, 0xe000 ; GFX900-NEXT: s_movk_i32 s5, 0xf000 ; GFX900-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX900-NEXT: ; =>This Loop Header: Depth=1 ; GFX900-NEXT: ; Child Loop BB1_2 Depth 2 -; GFX900-NEXT: v_mov_b32_e32 v4, v2 ; GFX900-NEXT: v_mov_b32_e32 v3, v1 +; GFX900-NEXT: v_mov_b32_e32 v2, v0 ; GFX900-NEXT: s_mov_b32 s6, 0 ; GFX900-NEXT: .LBB1_2: ; %for.body ; GFX900-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX900-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, 0xffffb000, v3 +; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, 0xffffb000, v2 ; GFX900-NEXT: s_mov_b64 s[0:1], vcc -; GFX900-NEXT: v_addc_co_u32_e64 v8, s[0:1], -1, v4, s[0:1] -; GFX900-NEXT: global_load_dwordx2 v[9:10], v[3:4], off offset:-4096 -; GFX900-NEXT: global_load_dwordx2 v[11:12], v[3:4], off offset:-2048 -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, 0xffffc000, v3 +; GFX900-NEXT: v_addc_co_u32_e64 v8, s[0:1], -1, v3, s[0:1] +; GFX900-NEXT: global_load_dwordx2 v[9:10], v[2:3], off offset:-4096 +; GFX900-NEXT: global_load_dwordx2 v[11:12], v[2:3], off offset:-2048 +; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, 0xffffc000, v2 ; GFX900-NEXT: global_load_dwordx2 v[7:8], v[7:8], off -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v4, vcc +; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v3, vcc ; GFX900-NEXT: s_addk_i32 s6, 0x2000 ; GFX900-NEXT: s_cmp_gt_u32 s6, 0x3fffff ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v7, v5 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v6, vcc -; GFX900-NEXT: global_load_dwordx2 v[5:6], v[13:14], off offset:-2048 +; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v7, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v5, vcc +; GFX900-NEXT: global_load_dwordx2 v[4:5], v[13:14], off offset:-2048 ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, v5, v7 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v6, v8, vcc -; GFX900-NEXT: global_load_dwordx2 v[6:7], v[13:14], off -; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, s2, v3 +; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, v4, v7 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v8, vcc +; GFX900-NEXT: global_load_dwordx2 v[7:8], v[13:14], off +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s2, v2 ; GFX900-NEXT: s_mov_b64 s[0:1], vcc ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v6, v15 -; GFX900-NEXT: v_addc_co_u32_e64 v6, s[0:1], -1, v4, s[0:1] -; GFX900-NEXT: global_load_dwordx2 v[5:6], v[5:6], off offset:-2048 -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v7, v8, vcc -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, s3, v3 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, -1, v4, vcc +; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v7, v15 +; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v8, v5, vcc +; GFX900-NEXT: v_addc_co_u32_e64 v5, s[0:1], -1, v3, s[0:1] +; GFX900-NEXT: global_load_dwordx2 v[4:5], v[4:5], off offset:-2048 +; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, s3, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, -1, v3, vcc ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v5, v13 -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v6, v14, vcc -; GFX900-NEXT: global_load_dwordx2 v[5:6], v[7:8], off offset:-4096 +; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v4, v13 +; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v5, v14, vcc +; GFX900-NEXT: global_load_dwordx2 v[4:5], v[7:8], off offset:-4096 ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v5, v13 -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v6, v14, vcc -; GFX900-NEXT: global_load_dwordx2 v[5:6], v[7:8], off offset:-2048 +; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v4, v13 +; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v5, v14, vcc +; GFX900-NEXT: global_load_dwordx2 v[4:5], v[7:8], off offset:-2048 ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v5, v13 +; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v4, v13 ; GFX900-NEXT: global_load_dwordx2 v[7:8], v[7:8], off -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v6, v14, vcc -; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, s5, v3 -; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, -1, v4, vcc -; GFX900-NEXT: global_load_dwordx2 v[5:6], v[5:6], off offset:-2048 +; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v5, v14, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s5, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc +; GFX900-NEXT: global_load_dwordx2 v[4:5], v[4:5], off offset:-2048 ; GFX900-NEXT: s_waitcnt vmcnt(1) ; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v7, v13 ; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v8, v14, vcc -; GFX900-NEXT: global_load_dwordx2 v[7:8], v[3:4], off -; GFX900-NEXT: v_add_co_u32_e32 v3, vcc, 0x10000, v3 -; GFX900-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v4, vcc +; GFX900-NEXT: global_load_dwordx2 v[7:8], v[2:3], off +; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, 0x10000, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX900-NEXT: s_waitcnt vmcnt(1) -; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v5, v13 -; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v6, v14, vcc -; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v9, v5 -; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v10, v6, vcc -; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v11, v5 -; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v12, v6, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v13 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v14, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v10, v5, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v11, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v12, v5, vcc ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v7, v5 -; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v8, v6, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v8, v5, vcc ; GFX900-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX900-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX900-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -669,9 +669,9 @@ ; GFX900-NEXT: s_branch .LBB1_1 ; GFX900-NEXT: .LBB1_5: ; %while.end ; GFX900-NEXT: v_mov_b32_e32 v1, s35 -; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v6 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX900-NEXT: global_store_dwordx2 v[0:1], v[5:6], off +; GFX900-NEXT: global_store_dwordx2 v[0:1], v[4:5], off ; GFX900-NEXT: s_endpgm ; ; GFX10-LABEL: clmem_read: @@ -696,40 +696,40 @@ ; GFX10-NEXT: s_swappc_b64 s[30:31], s[4:5] ; GFX10-NEXT: v_mov_b32_e32 v2, 0 ; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v0, 17, v0 -; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, 0 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 17, v0 ; GFX10-NEXT: s_movk_i32 s1, 0x7f -; GFX10-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2] -; GFX10-NEXT: v_and_b32_e32 v0, 0xfe000000, v0 -; GFX10-NEXT: v_or_b32_e32 v1, v0, v1 -; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, v1, s34 -; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, s35, v2, vcc_lo -; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, 0x5000, v1 -; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo +; GFX10-NEXT: v_lshlrev_b64 v[0:1], 3, v[1:2] +; GFX10-NEXT: v_and_b32_e32 v6, 0xfe000000, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, 0 +; GFX10-NEXT: v_mov_b32_e32 v3, 0 +; GFX10-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, s34 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s35, v1, vcc_lo +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; GFX10-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB1_2 Depth 2 -; GFX10-NEXT: v_mov_b32_e32 v6, v2 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_mov_b32 s2, 0 ; GFX10-NEXT: .LBB1_2: ; %for.body ; GFX10-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v5, 0xffffb800 -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo -; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v5, 0xffffc800 -; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo -; GFX10-NEXT: v_add_co_u32 v13, vcc_lo, v5, 0xffffd800 -; GFX10-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, -1, v6, vcc_lo -; GFX10-NEXT: v_add_co_u32 v17, vcc_lo, v5, 0xffffe800 +; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v4, 0xffffb800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v4, 0xffffc800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v13, vcc_lo, v4, 0xffffd800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v17, vcc_lo, v4, 0xffffe800 ; GFX10-NEXT: s_clause 0x2 ; GFX10-NEXT: global_load_dwordx2 v[11:12], v[7:8], off offset:-2048 ; GFX10-NEXT: global_load_dwordx2 v[15:16], v[9:10], off offset:-2048 ; GFX10-NEXT: global_load_dwordx2 v[19:20], v[13:14], off offset:-2048 -; GFX10-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo -; GFX10-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v5 -; GFX10-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo +; GFX10-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo ; GFX10-NEXT: s_clause 0x7 ; GFX10-NEXT: global_load_dwordx2 v[23:24], v[17:18], off offset:-2048 ; GFX10-NEXT: global_load_dwordx2 v[7:8], v[7:8], off @@ -737,42 +737,42 @@ ; GFX10-NEXT: global_load_dwordx2 v[13:14], v[13:14], off ; GFX10-NEXT: global_load_dwordx2 v[25:26], v[17:18], off ; GFX10-NEXT: global_load_dwordx2 v[27:28], v[21:22], off -; GFX10-NEXT: global_load_dwordx2 v[29:30], v[5:6], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[31:32], v[5:6], off -; GFX10-NEXT: v_add_co_u32 v5, vcc_lo, 0x10000, v5 -; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo +; GFX10-NEXT: global_load_dwordx2 v[29:30], v[4:5], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[31:32], v[4:5], off +; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo ; GFX10-NEXT: s_addk_i32 s2, 0x2000 ; GFX10-NEXT: s_cmp_gt_u32 s2, 0x3fffff ; GFX10-NEXT: s_waitcnt vmcnt(10) -; GFX10-NEXT: v_add_co_u32 v3, s0, v11, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v12, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v11, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v12, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(6) -; GFX10-NEXT: v_add_co_u32 v3, s0, v7, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v8, v4, s0 -; GFX10-NEXT: v_add_co_u32 v3, s0, v15, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v16, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v7, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v8, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v15, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v16, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(5) -; GFX10-NEXT: v_add_co_u32 v3, s0, v9, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v10, v4, s0 -; GFX10-NEXT: v_add_co_u32 v3, s0, v19, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v20, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v9, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v10, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v19, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v20, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(4) -; GFX10-NEXT: v_add_co_u32 v3, s0, v13, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v14, v4, s0 -; GFX10-NEXT: v_add_co_u32 v3, s0, v23, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v24, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v13, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v14, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v23, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v24, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(3) -; GFX10-NEXT: v_add_co_u32 v3, s0, v25, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v26, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v25, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v26, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(2) -; GFX10-NEXT: v_add_co_u32 v3, s0, v27, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v28, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v27, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v28, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(1) -; GFX10-NEXT: v_add_co_u32 v3, s0, v29, v3 -; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v30, v4, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v29, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v30, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v31, v3 -; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v32, v4, vcc_lo +; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v31, v2 +; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v32, v3, vcc_lo ; GFX10-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX10-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX10-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -783,9 +783,9 @@ ; GFX10-NEXT: s_mov_b32 s1, s0 ; GFX10-NEXT: s_branch .LBB1_1 ; GFX10-NEXT: .LBB1_5: ; %while.end -; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v0 +; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v6 ; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0 -; GFX10-NEXT: global_store_dwordx2 v[0:1], v[3:4], off +; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off ; GFX10-NEXT: s_endpgm ; ; GFX90A-LABEL: clmem_read: @@ -811,83 +811,83 @@ ; GFX90A-NEXT: v_and_b32_e32 v2, 0xff, v0 ; GFX90A-NEXT: v_mov_b32_e32 v3, 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 17, v0 -; GFX90A-NEXT: v_and_b32_e32 v0, 0xfe000000, v0 -; GFX90A-NEXT: v_lshlrev_b64 v[2:3], 3, v[2:3] -; GFX90A-NEXT: v_or_b32_e32 v1, v0, v2 +; GFX90A-NEXT: v_and_b32_e32 v6, 0xfe000000, v0 +; GFX90A-NEXT: v_lshlrev_b64 v[0:1], 3, v[2:3] +; GFX90A-NEXT: v_or_b32_e32 v0, v6, v0 ; GFX90A-NEXT: v_mov_b32_e32 v2, s35 -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v2, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 +; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc ; GFX90A-NEXT: s_movk_i32 s0, 0x5000 -; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX90A-NEXT: s_movk_i32 s2, 0x7f -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0 +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], 0, 0 ; GFX90A-NEXT: s_movk_i32 s0, 0xd000 ; GFX90A-NEXT: s_movk_i32 s1, 0xe000 ; GFX90A-NEXT: s_movk_i32 s3, 0xf000 ; GFX90A-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB1_2 Depth 2 -; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_mov_b32 s4, 0 ; GFX90A-NEXT: .LBB1_2: ; %for.body ; GFX90A-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX90A-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v6 -; GFX90A-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v7, vcc -; GFX90A-NEXT: global_load_dwordx2 v[10:11], v[6:7], off offset:-4096 -; GFX90A-NEXT: global_load_dwordx2 v[12:13], v[6:7], off offset:-2048 -; GFX90A-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v6 +; GFX90A-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v5, vcc +; GFX90A-NEXT: global_load_dwordx2 v[10:11], v[4:5], off offset:-4096 +; GFX90A-NEXT: global_load_dwordx2 v[12:13], v[4:5], off offset:-2048 +; GFX90A-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v4 ; GFX90A-NEXT: global_load_dwordx2 v[8:9], v[8:9], off -; GFX90A-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v7, vcc +; GFX90A-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v5, vcc ; GFX90A-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048 -; GFX90A-NEXT: v_add_co_u32_e32 v16, vcc, s0, v6 -; GFX90A-NEXT: v_addc_co_u32_e32 v17, vcc, -1, v7, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v16, vcc, s0, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v17, vcc, -1, v5, vcc ; GFX90A-NEXT: global_load_dwordx2 v[16:17], v[16:17], off offset:-2048 -; GFX90A-NEXT: v_add_co_u32_e32 v20, vcc, s1, v6 +; GFX90A-NEXT: v_add_co_u32_e32 v20, vcc, s1, v4 ; GFX90A-NEXT: global_load_dwordx2 v[14:15], v[14:15], off -; GFX90A-NEXT: v_addc_co_u32_e32 v21, vcc, -1, v7, vcc +; GFX90A-NEXT: v_addc_co_u32_e32 v21, vcc, -1, v5, vcc ; GFX90A-NEXT: global_load_dwordx2 v[24:25], v[20:21], off offset:-4096 ; GFX90A-NEXT: global_load_dwordx2 v[26:27], v[20:21], off offset:-2048 ; GFX90A-NEXT: global_load_dwordx2 v[28:29], v[20:21], off -; GFX90A-NEXT: v_add_co_u32_e32 v22, vcc, s3, v6 -; GFX90A-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v7, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v22, vcc, s3, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v5, vcc ; GFX90A-NEXT: global_load_dwordx2 v[20:21], v[22:23], off offset:-2048 -; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[6:7], off -; GFX90A-NEXT: v_add_co_u32_e32 v6, vcc, 0x10000, v6 -; GFX90A-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc +; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[4:5], off +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, 0x10000, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc ; GFX90A-NEXT: s_addk_i32 s4, 0x2000 ; GFX90A-NEXT: s_cmp_gt_u32 s4, 0x3fffff ; GFX90A-NEXT: s_waitcnt vmcnt(8) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v8, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v5, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v8, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v9, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(7) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v18, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v19, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v18, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v19, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(5) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v14, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v15, v4, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v16, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v17, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v14, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v15, v3, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v16, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v17, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(4) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v24, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v25, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v24, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v25, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(3) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v26, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v27, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v26, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v27, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(2) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v28, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v29, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v28, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v29, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(1) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v20, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v21, v4, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v10, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v11, v4, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v12, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v20, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v21, v3, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v10, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v11, v3, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v12, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v13, v3, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, v30, v2 +; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v31, v3, vcc ; GFX90A-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX90A-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX90A-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -899,9 +899,9 @@ ; GFX90A-NEXT: s_branch .LBB1_1 ; GFX90A-NEXT: .LBB1_5: ; %while.end ; GFX90A-NEXT: v_mov_b32_e32 v1, s35 -; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 +; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v6 ; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX90A-NEXT: global_store_dwordx2 v[0:1], v[4:5], off +; GFX90A-NEXT: global_store_dwordx2 v[0:1], v[2:3], off ; GFX90A-NEXT: s_endpgm ; ; GFX11-LABEL: clmem_read: @@ -916,102 +916,102 @@ ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_swappc_b64 s[30:31], s[2:3] ; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_and_b32 v1, 0xff, v0 -; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v0, 17, v0 -; GFX11-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-NEXT: v_lshlrev_b32_e32 v3, 17, v0 ; GFX11-NEXT: s_movk_i32 s1, 0x7f -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2] -; GFX11-NEXT: v_and_b32_e32 v0, 0xfe000000, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_or_b32_e32 v1, v0, v1 -; GFX11-NEXT: v_add_co_u32 v1, vcc_lo, v1, s34 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, s35, v2, vcc_lo -; GFX11-NEXT: v_add_co_u32 v1, vcc_lo, 0x5000, v1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_lshlrev_b64 v[0:1], 3, v[1:2] +; GFX11-NEXT: v_and_b32_e32 v6, 0xfe000000, v3 +; GFX11-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, s34 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s35, v1, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; GFX11-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX11-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v6, v2 :: v_dual_mov_b32 v5, v1 +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_mov_b32 s2, 0 ; GFX11-NEXT: .LBB1_2: ; %for.body ; GFX11-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v5, 0xffffc000 -; GFX11-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo -; GFX11-NEXT: v_add_co_u32 v9, vcc_lo, 0xffffc000, v5 -; GFX11-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo -; GFX11-NEXT: v_add_co_u32 v11, vcc_lo, 0xffffd000, v5 +; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v4, 0xffffc000 +; GFX11-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v9, vcc_lo, 0xffffc000, v4 +; GFX11-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v11, vcc_lo, 0xffffd000, v4 ; GFX11-NEXT: s_clause 0x1 ; GFX11-NEXT: global_load_b64 v[13:14], v[7:8], off offset:-4096 ; GFX11-NEXT: global_load_b64 v[9:10], v[9:10], off offset:-2048 -; GFX11-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, -1, v6, vcc_lo -; GFX11-NEXT: v_add_co_u32 v15, vcc_lo, v5, 0xffffe000 -; GFX11-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, -1, v6, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v15, vcc_lo, v4, 0xffffe000 +; GFX11-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, -1, v5, vcc_lo ; GFX11-NEXT: global_load_b64 v[11:12], v[11:12], off offset:-2048 -; GFX11-NEXT: v_add_co_u32 v17, vcc_lo, 0xffffe000, v5 +; GFX11-NEXT: v_add_co_u32 v17, vcc_lo, 0xffffe000, v4 ; GFX11-NEXT: s_clause 0x1 ; GFX11-NEXT: global_load_b64 v[19:20], v[15:16], off offset:-4096 ; GFX11-NEXT: global_load_b64 v[7:8], v[7:8], off -; GFX11-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo -; GFX11-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v5 -; GFX11-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v4 +; GFX11-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo ; GFX11-NEXT: s_clause 0x5 ; GFX11-NEXT: global_load_b64 v[17:18], v[17:18], off offset:-2048 ; GFX11-NEXT: global_load_b64 v[15:16], v[15:16], off ; GFX11-NEXT: global_load_b64 v[21:22], v[21:22], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[23:24], v[5:6], off offset:-4096 -; GFX11-NEXT: global_load_b64 v[25:26], v[5:6], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[27:28], v[5:6], off -; GFX11-NEXT: v_add_co_u32 v5, vcc_lo, 0x10000, v5 -; GFX11-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo +; GFX11-NEXT: global_load_b64 v[23:24], v[4:5], off offset:-4096 +; GFX11-NEXT: global_load_b64 v[25:26], v[4:5], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[27:28], v[4:5], off +; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4 +; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo ; GFX11-NEXT: s_addk_i32 s2, 0x2000 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX11-NEXT: s_cmp_gt_u32 s2, 0x3fffff ; GFX11-NEXT: s_waitcnt vmcnt(10) -; GFX11-NEXT: v_add_co_u32 v3, s0, v13, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v14, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v13, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v14, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(9) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v9, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v10, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v9, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v10, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(6) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v7, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v8, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v7, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v8, v3, s0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v11, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v12, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v11, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v12, v3, s0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v19, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v20, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v19, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v20, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(5) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v17, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v18, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v17, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v18, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(4) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v15, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v16, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v15, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v16, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(3) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v21, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v22, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v21, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v22, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(2) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v23, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v24, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v23, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v24, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(1) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v3, s0, v25, v3 -; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v26, v4, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v25, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, s0, v26, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, v27, v3 -; GFX11-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v28, v4, vcc_lo +; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v27, v2 +; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v28, v3, vcc_lo ; GFX11-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX11-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX11-NEXT: ; in Loop: Header=BB1_1 Depth=1 @@ -1022,10 +1022,10 @@ ; GFX11-NEXT: s_mov_b32 s1, s0 ; GFX11-NEXT: s_branch .LBB1_1 ; GFX11-NEXT: .LBB1_5: ; %while.end -; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v0 +; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v6 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0 -; GFX11-NEXT: global_store_b64 v[0:1], v[3:4], off +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX11-NEXT: s_endpgm entry: diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/rewrite-partial-reg-uses-gen.mir @@ -0,0 +1,5189 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -start-before=rename-independent-subregs -stop-after=rewrite-partial-reg-uses %s -o - | FileCheck -check-prefix=CHECK %s +# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -start-before=rename-independent-subregs %s -o /dev/null 2>&1 +--- +name: test_subregs_composition_vreg_1024 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_subregs_composition_vreg_1024 + ; CHECK: undef %5.sub0:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5.sub0_sub1 + ; CHECK-NEXT: S_NOP 0, implicit %5.sub1_sub2 + ; CHECK-NEXT: undef %6.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %6.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %6.sub0_sub1_sub2 + ; CHECK-NEXT: S_NOP 0, implicit %6.sub1_sub2_sub3 + ; CHECK-NEXT: undef %7.sub0:vreg_160 = V_MOV_B32_e32 21, implicit $exec + ; CHECK-NEXT: %7.sub1:vreg_160 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %7.sub0_sub1_sub2_sub3 + ; CHECK-NEXT: S_NOP 0, implicit %7.sub1_sub2_sub3_sub4 + ; CHECK-NEXT: undef %8.sub0:vreg_192 = V_MOV_B32_e32 31, implicit $exec + ; CHECK-NEXT: %8.sub1:vreg_192 = V_MOV_B32_e32 32, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %8.sub0_sub1_sub2_sub3_sub4 + ; CHECK-NEXT: S_NOP 0, implicit %8.sub1_sub2_sub3_sub4_sub5 + ; CHECK-NEXT: undef %9.sub0:vreg_256 = V_MOV_B32_e32 41, implicit $exec + ; CHECK-NEXT: %9.sub2:vreg_256 = V_MOV_B32_e32 43, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %9.sub0_sub1_sub2_sub3_sub4_sub5 + ; CHECK-NEXT: S_NOP 0, implicit %9.sub2_sub3_sub4_sub5_sub6_sub7 + undef %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub1_sub2 + S_NOP 0, implicit %0.sub2_sub3 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + S_NOP 0, implicit %1.sub2_sub3_sub4 + + undef %2.sub1:vreg_1024 = V_MOV_B32_e32 21, implicit $exec + %2.sub2:vreg_1024 = V_MOV_B32_e32 22, implicit $exec + S_NOP 0, implicit %2.sub1_sub2_sub3_sub4 + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5 + + undef %3.sub1:vreg_1024 = V_MOV_B32_e32 31, implicit $exec + %3.sub2:vreg_1024 = V_MOV_B32_e32 32, implicit $exec + S_NOP 0, implicit %3.sub1_sub2_sub3_sub4_sub5 + S_NOP 0, implicit %3.sub2_sub3_sub4_sub5_sub6 + + undef %4.sub1:vreg_1024 = V_MOV_B32_e32 41, implicit $exec + %4.sub3:vreg_1024 = V_MOV_B32_e32 43, implicit $exec + S_NOP 0, implicit %4.sub1_sub2_sub3_sub4_sub5_sub6 + S_NOP 0, implicit %4.sub3_sub4_sub5_sub6_sub7_sub8 + +... +--- +name: test_vreg_64_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_64_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + undef %0.sub0:vreg_64 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_64 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + +... +--- +name: test_vreg_96_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_96_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_96 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_96 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub2:vreg_96 = V_MOV_B32_e32 22, implicit $exec + S_NOP 0, implicit %2.sub2 + +... +--- +name: test_vreg_96_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_96_w64 + ; CHECK: undef %2.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_96 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_96 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_96 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_96 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + +... +--- +name: test_vreg_128_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_128_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_128 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_128 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub3:vreg_128 = V_MOV_B32_e32 23, implicit $exec + S_NOP 0, implicit %2.sub3 + +... +--- +name: test_vreg_128_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_128_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_128 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_128 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_128 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_128 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub2:vreg_128 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_128 = V_MOV_B32_e32 23, implicit $exec + S_NOP 0, implicit %2.sub2_sub3 + +... +--- +name: test_vreg_128_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_128_w96 + ; CHECK: undef %2.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_128 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_128 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_128 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_128 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_128 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_128 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + +... +--- +name: test_vreg_160_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_160_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_160 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_160 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub4:vreg_160 = V_MOV_B32_e32 24, implicit $exec + S_NOP 0, implicit %2.sub4 + +... +--- +name: test_vreg_160_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_160_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_160 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_160 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_160 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_160 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub3:vreg_160 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_160 = V_MOV_B32_e32 24, implicit $exec + S_NOP 0, implicit %2.sub3_sub4 + +... +--- +name: test_vreg_160_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_160_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_160 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_160 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_160 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_160 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_160 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_160 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub2:vreg_160 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_160 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_160 = V_MOV_B32_e32 24, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4 + +... +--- +name: test_vreg_160_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_160_w128 + ; CHECK: undef %2.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_160 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_160 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_160 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_160 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_160 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_160 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_160 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_160 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + +... +--- +name: test_vreg_192_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_192 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_192 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub5:vreg_192 = V_MOV_B32_e32 25, implicit $exec + S_NOP 0, implicit %2.sub5 + +... +--- +name: test_vreg_192_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_192 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_192 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_192 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_192 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub4:vreg_192 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_192 = V_MOV_B32_e32 25, implicit $exec + S_NOP 0, implicit %2.sub4_sub5 + +... +--- +name: test_vreg_192_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_192 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_192 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_192 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_192 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_192 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_192 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub3:vreg_192 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_192 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_192 = V_MOV_B32_e32 25, implicit $exec + S_NOP 0, implicit %2.sub3_sub4_sub5 + +... +--- +name: test_vreg_192_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_192 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_192 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_192 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_192 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_192 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_192 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_192 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_192 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub2:vreg_192 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_192 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_192 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_192 = V_MOV_B32_e32 25, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5 + +... +--- +name: test_vreg_192_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_w160 + ; CHECK: undef %2.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %2.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_192 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_192 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_192 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_192 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_192 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_192 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_192 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_192 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_192 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_192 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + +... +--- +name: test_vreg_224_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_224 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_224 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub6:vreg_224 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub6 + +... +--- +name: test_vreg_224_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_224 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_224 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_224 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub5:vreg_224 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_224 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub5_sub6 + +... +--- +name: test_vreg_224_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_224 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_224 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_224 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_224 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_224 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub4:vreg_224 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_224 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_224 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6 + +... +--- +name: test_vreg_224_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_224 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_224 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_224 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_224 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_224 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_224 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_224 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub3:vreg_224 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_224 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_224 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_224 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub3_sub4_sub5_sub6 + +... +--- +name: test_vreg_224_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_224 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_224 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_224 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_224 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_224 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_224 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_224 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_224 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_224 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub2:vreg_224 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_224 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_224 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_224 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_224 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5_sub6 + +... +--- +name: test_vreg_224_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_w192 + ; CHECK: undef %2.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %2.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %2.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_224 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_224 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_224 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_224 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_224 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_224 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_224 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_224 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_224 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_224 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_224 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + +... +--- +name: test_vreg_256_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_256 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_256 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub7:vreg_256 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub7 + +... +--- +name: test_vreg_256_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_256 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_256 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub6:vreg_256 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub6_sub7 + +... +--- +name: test_vreg_256_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_256 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_256 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_256 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub5:vreg_256 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_256 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub5_sub6_sub7 + +... +--- +name: test_vreg_256_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_256 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_256 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_256 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_256 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_256 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub4:vreg_256 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_256 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_256 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7 + +... +--- +name: test_vreg_256_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_256 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_256 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_256 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_256 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_256 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_256 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_256 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub3:vreg_256 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_256 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_256 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_256 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub3_sub4_sub5_sub6_sub7 + +... +--- +name: test_vreg_256_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_256 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_256 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_256 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_256 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_256 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_256 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_256 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_256 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_256 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub2:vreg_256 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_256 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_256 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_256 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_256 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5_sub6_sub7 + +... +--- +name: test_vreg_288_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub8:vreg_288 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub8 + +... +--- +name: test_vreg_288_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_288 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub7:vreg_288 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub7_sub8 + +... +--- +name: test_vreg_288_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_288 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub6:vreg_288 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_288 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8 + +... +--- +name: test_vreg_288_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_288 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_288 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_288 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub5:vreg_288 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_288 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_288 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub5_sub6_sub7_sub8 + +... +--- +name: test_vreg_288_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_288 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_288 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_288 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_288 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_288 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub4:vreg_288 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_288 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_288 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_288 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8 + +... +--- +name: test_vreg_288_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_288 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_288 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_288 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_288 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_288 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_288 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_288 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub3:vreg_288 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_288 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_288 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_288 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_288 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub3_sub4_sub5_sub6_sub7_sub8 + +... +--- +name: test_vreg_288_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_w256 + ; CHECK: undef %2.sub0:vreg_256 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_256 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_256 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_256 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %2.sub4:vreg_256 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %2.sub5:vreg_256 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %2.sub6:vreg_256 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %2.sub7:vreg_256 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_256 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_288 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_288 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_288 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_288 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_288 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_288 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub1:vreg_288 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_288 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_288 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_288 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_288 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_288 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_288 = V_MOV_B32_e32 18, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8 + +... +--- +name: test_vreg_320_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub9 + +... +--- +name: test_vreg_320_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_320 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub8:vreg_320 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub8_sub9 + +... +--- +name: test_vreg_320_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_320 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub7:vreg_320 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub7_sub8_sub9 + +... +--- +name: test_vreg_320_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_320 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub6:vreg_320 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_320 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8_sub9 + +... +--- +name: test_vreg_320_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_320 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_320 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_320 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub5:vreg_320 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_320 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_320 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub5_sub6_sub7_sub8_sub9 + +... +--- +name: test_vreg_320_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_320 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_320 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_320 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_320 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_320 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub4:vreg_320 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_320 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_320 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_320 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8_sub9 + +... +--- +name: test_vreg_320_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_w256 + ; CHECK: undef %3.sub0:vreg_256 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_320 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_320 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_320 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_320 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub1:vreg_320 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_320 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_320 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_320 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_320 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_320 = V_MOV_B32_e32 18, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8 + + undef %2.sub2:vreg_320 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_320 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_320 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_320 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_320 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_320 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9 + +... +--- +name: test_vreg_352_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub10 + +... +--- +name: test_vreg_352_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_352 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub9:vreg_352 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub9_sub10 + +... +--- +name: test_vreg_352_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_352 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub8:vreg_352 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub8_sub9_sub10 + +... +--- +name: test_vreg_352_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_352 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_352 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_352 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub7:vreg_352 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_352 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub7_sub8_sub9_sub10 + +... +--- +name: test_vreg_352_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_352 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_352 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_352 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_352 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_352 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub6:vreg_352 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_352 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_352 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8_sub9_sub10 + +... +--- +name: test_vreg_352_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_352 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_352 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_352 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_352 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_352 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_352 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_352 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub5:vreg_352 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_352 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_352 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_352 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub5_sub6_sub7_sub8_sub9_sub10 + +... +--- +name: test_vreg_352_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_w256 + ; CHECK: undef %3.sub0:vreg_256 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_352 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_352 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_352 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_352 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_352 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub1:vreg_352 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_352 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_352 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_352 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_352 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_352 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_352 = V_MOV_B32_e32 18, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8 + + undef %2.sub3:vreg_352 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_352 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_352 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_352 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_352 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_352 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10 + +... +--- +name: test_vreg_384_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub11 + +... +--- +name: test_vreg_384_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_384 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub10:vreg_384 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub10_sub11 + +... +--- +name: test_vreg_384_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_384 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub9:vreg_384 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub9_sub10_sub11 + +... +--- +name: test_vreg_384_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_384 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub8:vreg_384 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub8_sub9_sub10_sub11 + +... +--- +name: test_vreg_384_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_384 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_384 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_384 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub7:vreg_384 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_384 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub7_sub8_sub9_sub10_sub11 + +... +--- +name: test_vreg_384_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_384 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_384 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_384 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_384 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_384 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub6:vreg_384 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_384 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_384 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8_sub9_sub10_sub11 + +... +--- +name: test_vreg_384_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_w256 + ; CHECK: undef %3.sub0:vreg_256 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_384 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_384 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_384 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_384 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub1:vreg_384 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_384 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_384 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_384 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_384 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_384 = V_MOV_B32_e32 18, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8 + + undef %2.sub4:vreg_384 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_384 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_384 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_384 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_384 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11 + +... +--- +name: test_vreg_512_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub15 + +... +--- +name: test_vreg_512_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_512 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub14:vreg_512 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub14_sub15 + +... +--- +name: test_vreg_512_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_512 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_512 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub13:vreg_512 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_512 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub13_sub14_sub15 + +... +--- +name: test_vreg_512_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 212, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_512 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_512 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_512 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub12:vreg_512 = V_MOV_B32_e32 212, implicit $exec + %2.sub13:vreg_512 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_512 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub12_sub13_sub14_sub15 + +... +--- +name: test_vreg_512_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 212, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_512 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_512 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_512 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_512 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_512 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub11:vreg_512 = V_MOV_B32_e32 211, implicit $exec + %2.sub12:vreg_512 = V_MOV_B32_e32 212, implicit $exec + %2.sub13:vreg_512 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_512 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub11_sub12_sub13_sub14_sub15 + +... +--- +name: test_vreg_512_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 212, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_512 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_512 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_512 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_512 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_512 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_512 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_512 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub10:vreg_512 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_512 = V_MOV_B32_e32 211, implicit $exec + %2.sub12:vreg_512 = V_MOV_B32_e32 212, implicit $exec + %2.sub13:vreg_512 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_512 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub10_sub11_sub12_sub13_sub14_sub15 + +... +--- +name: test_vreg_512_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_w256 + ; CHECK: undef %3.sub0:vreg_256 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256 = V_MOV_B32_e32 212, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_512 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_512 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_512 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_512 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub1:vreg_512 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_512 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_512 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_512 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_512 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_512 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_512 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_512 = V_MOV_B32_e32 18, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8 + + undef %2.sub8:vreg_512 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_512 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_512 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_512 = V_MOV_B32_e32 211, implicit $exec + %2.sub12:vreg_512 = V_MOV_B32_e32 212, implicit $exec + %2.sub13:vreg_512 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_512 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15 + +... +--- +name: test_vreg_1024_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub31 + +... +--- +name: test_vreg_1024_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w64 + ; CHECK: undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + S_NOP 0, implicit %1.sub1_sub2 + + undef %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub30_sub31 + +... +--- +name: test_vreg_1024_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w96 + ; CHECK: undef %3.sub0:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3 + + undef %2.sub29:vreg_1024 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w128 + ; CHECK: undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4 + + undef %2.sub28:vreg_1024 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub28_sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w160 + ; CHECK: undef %3.sub0:vreg_160 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160 = V_MOV_B32_e32 227, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5 + + undef %2.sub27:vreg_1024 = V_MOV_B32_e32 227, implicit $exec + %2.sub28:vreg_1024 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub27_sub28_sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w192 + ; CHECK: undef %3.sub0:vreg_192 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192 = V_MOV_B32_e32 226, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192 = V_MOV_B32_e32 227, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_1024 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_1024 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6 + + undef %2.sub26:vreg_1024 = V_MOV_B32_e32 226, implicit $exec + %2.sub27:vreg_1024 = V_MOV_B32_e32 227, implicit $exec + %2.sub28:vreg_1024 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub26_sub27_sub28_sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w256 + ; CHECK: undef %3.sub0:vreg_256 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256 = V_MOV_B32_e32 224, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256 = V_MOV_B32_e32 225, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256 = V_MOV_B32_e32 226, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256 = V_MOV_B32_e32 227, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_1024 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_1024 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_1024 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_1024 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_1024 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_1024 = V_MOV_B32_e32 18, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8 + + undef %2.sub24:vreg_1024 = V_MOV_B32_e32 224, implicit $exec + %2.sub25:vreg_1024 = V_MOV_B32_e32 225, implicit $exec + %2.sub26:vreg_1024 = V_MOV_B32_e32 226, implicit $exec + %2.sub27:vreg_1024 = V_MOV_B32_e32 227, implicit $exec + %2.sub28:vreg_1024 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_w512 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_w512 + ; CHECK: undef %3.sub0:vreg_512 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_512 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_512 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_512 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_512 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_512 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_512 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_512 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: %3.sub8:vreg_512 = V_MOV_B32_e32 8, implicit $exec + ; CHECK-NEXT: %3.sub9:vreg_512 = V_MOV_B32_e32 9, implicit $exec + ; CHECK-NEXT: %3.sub10:vreg_512 = V_MOV_B32_e32 10, implicit $exec + ; CHECK-NEXT: %3.sub11:vreg_512 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub12:vreg_512 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub13:vreg_512 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub14:vreg_512 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub15:vreg_512 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_512 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_512 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_512 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_512 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_512 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_512 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_512 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_512 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: %4.sub8:vreg_512 = V_MOV_B32_e32 19, implicit $exec + ; CHECK-NEXT: %4.sub9:vreg_512 = V_MOV_B32_e32 110, implicit $exec + ; CHECK-NEXT: %4.sub10:vreg_512 = V_MOV_B32_e32 111, implicit $exec + ; CHECK-NEXT: %4.sub11:vreg_512 = V_MOV_B32_e32 112, implicit $exec + ; CHECK-NEXT: %4.sub12:vreg_512 = V_MOV_B32_e32 113, implicit $exec + ; CHECK-NEXT: %4.sub13:vreg_512 = V_MOV_B32_e32 114, implicit $exec + ; CHECK-NEXT: %4.sub14:vreg_512 = V_MOV_B32_e32 115, implicit $exec + ; CHECK-NEXT: %4.sub15:vreg_512 = V_MOV_B32_e32 116, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_512 = V_MOV_B32_e32 216, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_512 = V_MOV_B32_e32 217, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_512 = V_MOV_B32_e32 218, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_512 = V_MOV_B32_e32 219, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_512 = V_MOV_B32_e32 220, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_512 = V_MOV_B32_e32 221, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_512 = V_MOV_B32_e32 222, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_512 = V_MOV_B32_e32 223, implicit $exec + ; CHECK-NEXT: %5.sub8:vreg_512 = V_MOV_B32_e32 224, implicit $exec + ; CHECK-NEXT: %5.sub9:vreg_512 = V_MOV_B32_e32 225, implicit $exec + ; CHECK-NEXT: %5.sub10:vreg_512 = V_MOV_B32_e32 226, implicit $exec + ; CHECK-NEXT: %5.sub11:vreg_512 = V_MOV_B32_e32 227, implicit $exec + ; CHECK-NEXT: %5.sub12:vreg_512 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub13:vreg_512 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub14:vreg_512 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub15:vreg_512 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_1024 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_1024 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_1024 = V_MOV_B32_e32 07, implicit $exec + %0.sub8:vreg_1024 = V_MOV_B32_e32 08, implicit $exec + %0.sub9:vreg_1024 = V_MOV_B32_e32 09, implicit $exec + %0.sub10:vreg_1024 = V_MOV_B32_e32 010, implicit $exec + %0.sub11:vreg_1024 = V_MOV_B32_e32 011, implicit $exec + %0.sub12:vreg_1024 = V_MOV_B32_e32 012, implicit $exec + %0.sub13:vreg_1024 = V_MOV_B32_e32 013, implicit $exec + %0.sub14:vreg_1024 = V_MOV_B32_e32 014, implicit $exec + %0.sub15:vreg_1024 = V_MOV_B32_e32 015, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15 + + undef %1.sub1:vreg_1024 = V_MOV_B32_e32 11, implicit $exec + %1.sub2:vreg_1024 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_1024 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_1024 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_1024 = V_MOV_B32_e32 18, implicit $exec + %1.sub9:vreg_1024 = V_MOV_B32_e32 19, implicit $exec + %1.sub10:vreg_1024 = V_MOV_B32_e32 110, implicit $exec + %1.sub11:vreg_1024 = V_MOV_B32_e32 111, implicit $exec + %1.sub12:vreg_1024 = V_MOV_B32_e32 112, implicit $exec + %1.sub13:vreg_1024 = V_MOV_B32_e32 113, implicit $exec + %1.sub14:vreg_1024 = V_MOV_B32_e32 114, implicit $exec + %1.sub15:vreg_1024 = V_MOV_B32_e32 115, implicit $exec + %1.sub16:vreg_1024 = V_MOV_B32_e32 116, implicit $exec + S_NOP 0, implicit %1.sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15_sub16 + + undef %2.sub16:vreg_1024 = V_MOV_B32_e32 216, implicit $exec + %2.sub17:vreg_1024 = V_MOV_B32_e32 217, implicit $exec + %2.sub18:vreg_1024 = V_MOV_B32_e32 218, implicit $exec + %2.sub19:vreg_1024 = V_MOV_B32_e32 219, implicit $exec + %2.sub20:vreg_1024 = V_MOV_B32_e32 220, implicit $exec + %2.sub21:vreg_1024 = V_MOV_B32_e32 221, implicit $exec + %2.sub22:vreg_1024 = V_MOV_B32_e32 222, implicit $exec + %2.sub23:vreg_1024 = V_MOV_B32_e32 223, implicit $exec + %2.sub24:vreg_1024 = V_MOV_B32_e32 224, implicit $exec + %2.sub25:vreg_1024 = V_MOV_B32_e32 225, implicit $exec + %2.sub26:vreg_1024 = V_MOV_B32_e32 226, implicit $exec + %2.sub27:vreg_1024 = V_MOV_B32_e32 227, implicit $exec + %2.sub28:vreg_1024 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31 + +... +--- +name: test_subregs_composition_vreg_1024_align2 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_subregs_composition_vreg_1024_align2 + ; CHECK: undef %4.sub0:vreg_160_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4.sub0_sub1_sub2 + ; CHECK-NEXT: S_NOP 0, implicit %4.sub2_sub3_sub4 + ; CHECK-NEXT: undef %5.sub0:vreg_192_align2 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5.sub0_sub1_sub2_sub3 + ; CHECK-NEXT: S_NOP 0, implicit %5.sub2_sub3_sub4_sub5 + ; CHECK-NEXT: undef %6.sub0:vreg_224_align2 = V_MOV_B32_e32 32, implicit $exec + ; CHECK-NEXT: %6.sub2:vreg_224_align2 = V_MOV_B32_e32 34, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %6.sub0_sub1_sub2_sub3_sub4 + ; CHECK-NEXT: S_NOP 0, implicit %6.sub2_sub3_sub4_sub5_sub6 + ; CHECK-NEXT: undef %7.sub0:vreg_256_align2 = V_MOV_B32_e32 42, implicit $exec + ; CHECK-NEXT: %7.sub2:vreg_256_align2 = V_MOV_B32_e32 44, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %7.sub0_sub1_sub2_sub3_sub4_sub5 + ; CHECK-NEXT: S_NOP 0, implicit %7.sub2_sub3_sub4_sub5_sub6_sub7 + undef %1.sub2:vreg_1024_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub4:vreg_1024_align2 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4 + S_NOP 0, implicit %1.sub4_sub5_sub6 + + undef %2.sub2:vreg_1024_align2 = V_MOV_B32_e32 22, implicit $exec + %2.sub4:vreg_1024_align2 = V_MOV_B32_e32 24, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5 + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7 + + undef %3.sub2:vreg_1024_align2 = V_MOV_B32_e32 32, implicit $exec + %3.sub4:vreg_1024_align2 = V_MOV_B32_e32 34, implicit $exec + S_NOP 0, implicit %3.sub2_sub3_sub4_sub5_sub6 + S_NOP 0, implicit %3.sub4_sub5_sub6_sub7_sub8 + + undef %4.sub2:vreg_1024_align2 = V_MOV_B32_e32 42, implicit $exec + %4.sub4:vreg_1024_align2 = V_MOV_B32_e32 44, implicit $exec + S_NOP 0, implicit %4.sub2_sub3_sub4_sub5_sub6_sub7 + S_NOP 0, implicit %4.sub4_sub5_sub6_sub7_sub8_sub9 + +... +# Skip test_vreg_64_align2_w32 +--- +name: test_vreg_96_align2_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_96_align2_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_96_align2 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_96_align2 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub2:vreg_96_align2 = V_MOV_B32_e32 22, implicit $exec + S_NOP 0, implicit %2.sub2 + +... +# Skip test_vreg_96_align2_w64 +# Skip test_vreg_128_align2_w32 +--- +name: test_vreg_128_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_128_align2_w64 + ; CHECK: undef %2.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_128_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_128_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:vreg_128_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_128_align2 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub2_sub3 + +... +# Skip test_vreg_128_align2_w96 +--- +name: test_vreg_160_align2_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_160_align2_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_160_align2 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_160_align2 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub4:vreg_160_align2 = V_MOV_B32_e32 24, implicit $exec + S_NOP 0, implicit %2.sub4 + +... +# Skip test_vreg_160_align2_w64 +--- +name: test_vreg_160_align2_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_160_align2_w96 + ; CHECK: undef %2.sub0:vreg_96_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_96_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_96_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_96_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_160_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_160_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_160_align2 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub2:vreg_160_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_160_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_160_align2 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4 + +... +# Skip test_vreg_160_align2_w128 +# Skip test_vreg_192_align2_w32 +--- +name: test_vreg_192_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_align2_w64 + ; CHECK: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_192_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_192_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:vreg_192_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_192_align2 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub4:vreg_192_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_192_align2 = V_MOV_B32_e32 25, implicit $exec + S_NOP 0, implicit %2.sub4_sub5 + +... +# Skip test_vreg_192_align2_w96 +--- +name: test_vreg_192_align2_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_192_align2_w128 + ; CHECK: undef %2.sub0:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_128_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_128_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_128_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_128_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_192_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_192_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_192_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_192_align2 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub2:vreg_192_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_192_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_192_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_192_align2 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5 + +... +# Skip test_vreg_192_align2_w160 +--- +name: test_vreg_224_align2_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_align2_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_224_align2 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_224_align2 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub6:vreg_224_align2 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub6 + +... +# Skip test_vreg_224_align2_w64 +--- +name: test_vreg_224_align2_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_align2_w96 + ; CHECK: undef %3.sub0:vreg_96_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_224_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_224_align2 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub2:vreg_224_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_224_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_224_align2 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4 + + undef %2.sub4:vreg_224_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_224_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_224_align2 = V_MOV_B32_e32 26, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6 + +... +# Skip test_vreg_224_align2_w128 +--- +name: test_vreg_224_align2_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_224_align2_w160 + ; CHECK: undef %2.sub0:vreg_160_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_160_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_160_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_160_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %2.sub4:vreg_160_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_160_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_224_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_224_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_224_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_224_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_224_align2 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub2:vreg_224_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_224_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_224_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_224_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_224_align2 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6 + +... +# Skip test_vreg_224_align2_w192 +# Skip test_vreg_256_align2_w32 +--- +name: test_vreg_256_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_align2_w64 + ; CHECK: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub6:vreg_256_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256_align2 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub6_sub7 + +... +# Skip test_vreg_256_align2_w96 +--- +name: test_vreg_256_align2_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_align2_w128 + ; CHECK: undef %3.sub0:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_256_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_256_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_256_align2 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub2:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_256_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_256_align2 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5 + + undef %2.sub4:vreg_256_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_256_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_256_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_256_align2 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7 + +... +# Skip test_vreg_256_align2_w160 +--- +name: test_vreg_256_align2_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_256_align2_w192 + ; CHECK: undef %2.sub0:vreg_192_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_192_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_192_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_192_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %2.sub4:vreg_192_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %2.sub5:vreg_192_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_192_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_256_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_256_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_256_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_256_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_256_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_256_align2 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub2:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_256_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_256_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_256_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_256_align2 = V_MOV_B32_e32 17, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7 + +... +--- +name: test_vreg_288_align2_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_align2_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_288_align2 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_288_align2 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub8:vreg_288_align2 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub8 + +... +# Skip test_vreg_288_align2_w64 +--- +name: test_vreg_288_align2_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_align2_w96 + ; CHECK: undef %3.sub0:vreg_96_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288_align2 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub2:vreg_288_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_288_align2 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4 + + undef %2.sub6:vreg_288_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_288_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288_align2 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8 + +... +# Skip test_vreg_288_align2_w128 +--- +name: test_vreg_288_align2_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_288_align2_w160 + ; CHECK: undef %3.sub0:vreg_160_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_288_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_288_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_288_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_288_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_288_align2 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub2:vreg_288_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_288_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_288_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_288_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_288_align2 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6 + + undef %2.sub4:vreg_288_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_288_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_288_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_288_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_288_align2 = V_MOV_B32_e32 28, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8 + +... +# Skip test_vreg_288_align2_w192 +# Skip test_vreg_288_align2_w256 +# Skip test_vreg_320_align2_w32 +--- +name: test_vreg_320_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_align2_w64 + ; CHECK: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:vreg_320_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320_align2 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub8:vreg_320_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320_align2 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub8_sub9 + +... +# Skip test_vreg_320_align2_w96 +--- +name: test_vreg_320_align2_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_align2_w128 + ; CHECK: undef %3.sub0:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320_align2 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub2:vreg_320_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_320_align2 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5 + + undef %2.sub6:vreg_320_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_320_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320_align2 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8_sub9 + +... +# Skip test_vreg_320_align2_w160 +--- +name: test_vreg_320_align2_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_align2_w192 + ; CHECK: undef %3.sub0:vreg_192_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_320_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_320_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_320_align2 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub2:vreg_320_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_320_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_320_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_320_align2 = V_MOV_B32_e32 17, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7 + + undef %2.sub4:vreg_320_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_320_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_320_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_320_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_320_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_320_align2 = V_MOV_B32_e32 29, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8_sub9 + +... +--- +name: test_vreg_320_align2_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_320_align2_w256 + ; CHECK: undef %2.sub0:vreg_256_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %2.sub1:vreg_256_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %2.sub2:vreg_256_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %2.sub3:vreg_256_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %2.sub4:vreg_256_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %2.sub5:vreg_256_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %2.sub6:vreg_256_align2 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %2.sub7:vreg_256_align2 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256_align2 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256_align2 = V_MOV_B32_e32 19, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:vreg_320_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_320_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_320_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_320_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_320_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_320_align2 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_320_align2 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_320_align2 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub2:vreg_320_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_320_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_320_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_320_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_320_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_320_align2 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_320_align2 = V_MOV_B32_e32 18, implicit $exec + %1.sub9:vreg_320_align2 = V_MOV_B32_e32 19, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9 + +... +--- +name: test_vreg_352_align2_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_align2_w32 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_2]] + undef %0.sub0:vreg_352_align2 = V_MOV_B32_e32 00, implicit $exec + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:vreg_352_align2 = V_MOV_B32_e32 11, implicit $exec + S_NOP 0, implicit %1.sub1 + + undef %2.sub10:vreg_352_align2 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub10 + +... +# Skip test_vreg_352_align2_w64 +--- +name: test_vreg_352_align2_w96 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_align2_w96 + ; CHECK: undef %3.sub0:vreg_96_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_96_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_96_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_96_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_96_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_96_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_96_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_96_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_96_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352_align2 = V_MOV_B32_e32 02, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2 + + undef %1.sub2:vreg_352_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_352_align2 = V_MOV_B32_e32 14, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4 + + undef %2.sub8:vreg_352_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352_align2 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub8_sub9_sub10 + +... +# Skip test_vreg_352_align2_w128 +--- +name: test_vreg_352_align2_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_352_align2_w160 + ; CHECK: undef %3.sub0:vreg_160_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_160_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_160_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_160_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_160_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_160_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_160_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_160_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_160_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_160_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_160_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_160_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_160_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_160_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_160_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_352_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_352_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_352_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_352_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_352_align2 = V_MOV_B32_e32 04, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub2:vreg_352_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_352_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_352_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_352_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_352_align2 = V_MOV_B32_e32 16, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6 + + undef %2.sub6:vreg_352_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_352_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_352_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_352_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_352_align2 = V_MOV_B32_e32 210, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8_sub9_sub10 + +... +# Skip test_vreg_352_align2_w192 +# Skip test_vreg_352_align2_w256 +# Skip test_vreg_384_align2_w32 +--- +name: test_vreg_384_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_align2_w64 + ; CHECK: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64_align2 = V_MOV_B32_e32 110, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64_align2 = V_MOV_B32_e32 111, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64_align2 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64_align2 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub10:vreg_384_align2 = V_MOV_B32_e32 110, implicit $exec + %1.sub11:vreg_384_align2 = V_MOV_B32_e32 111, implicit $exec + S_NOP 0, implicit %1.sub10_sub11 + + undef %2.sub2:vreg_384_align2 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_384_align2 = V_MOV_B32_e32 23, implicit $exec + S_NOP 0, implicit %2.sub2_sub3 + +... +# Skip test_vreg_384_align2_w96 +--- +name: test_vreg_384_align2_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_align2_w128 + ; CHECK: undef %3.sub0:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128_align2 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384_align2 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub2:vreg_384_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_384_align2 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5 + + undef %2.sub8:vreg_384_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384_align2 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384_align2 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub8_sub9_sub10_sub11 + +... +# Skip test_vreg_384_align2_w160 +--- +name: test_vreg_384_align2_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_align2_w192 + ; CHECK: undef %3.sub0:vreg_192_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192_align2 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_384_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_384_align2 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub2:vreg_384_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_384_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_384_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_384_align2 = V_MOV_B32_e32 17, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7 + + undef %2.sub6:vreg_384_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_384_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_384_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384_align2 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384_align2 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub6_sub7_sub8_sub9_sub10_sub11 + +... +--- +name: test_vreg_384_align2_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_384_align2_w256 + ; CHECK: undef %3.sub0:vreg_256_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256_align2 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256_align2 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256_align2 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256_align2 = V_MOV_B32_e32 19, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256_align2 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_384_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_384_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_384_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_384_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_384_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_384_align2 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_384_align2 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_384_align2 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub2:vreg_384_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_384_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_384_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_384_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_384_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_384_align2 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_384_align2 = V_MOV_B32_e32 18, implicit $exec + %1.sub9:vreg_384_align2 = V_MOV_B32_e32 19, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9 + + undef %2.sub4:vreg_384_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_384_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_384_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_384_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_384_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_384_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_384_align2 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_384_align2 = V_MOV_B32_e32 211, implicit $exec + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11 + +... +# Skip test_vreg_512_align2_w32 +--- +name: test_vreg_512_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_align2_w64 + ; CHECK: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64_align2 = V_MOV_B32_e32 114, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64_align2 = V_MOV_B32_e32 115, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64_align2 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64_align2 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub14:vreg_512_align2 = V_MOV_B32_e32 114, implicit $exec + %1.sub15:vreg_512_align2 = V_MOV_B32_e32 115, implicit $exec + S_NOP 0, implicit %1.sub14_sub15 + + undef %2.sub2:vreg_512_align2 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_512_align2 = V_MOV_B32_e32 23, implicit $exec + S_NOP 0, implicit %2.sub2_sub3 + +... +# Skip test_vreg_512_align2_w96 +--- +name: test_vreg_512_align2_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_align2_w128 + ; CHECK: undef %3.sub0:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128_align2 = V_MOV_B32_e32 112, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128_align2 = V_MOV_B32_e32 113, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128_align2 = V_MOV_B32_e32 114, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128_align2 = V_MOV_B32_e32 115, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128_align2 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128_align2 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512_align2 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub12:vreg_512_align2 = V_MOV_B32_e32 112, implicit $exec + %1.sub13:vreg_512_align2 = V_MOV_B32_e32 113, implicit $exec + %1.sub14:vreg_512_align2 = V_MOV_B32_e32 114, implicit $exec + %1.sub15:vreg_512_align2 = V_MOV_B32_e32 115, implicit $exec + S_NOP 0, implicit %1.sub12_sub13_sub14_sub15 + + undef %2.sub2:vreg_512_align2 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_512_align2 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_512_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_512_align2 = V_MOV_B32_e32 25, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5 + +... +# Skip test_vreg_512_align2_w160 +--- +name: test_vreg_512_align2_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_align2_w192 + ; CHECK: undef %3.sub0:vreg_192_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192_align2 = V_MOV_B32_e32 110, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192_align2 = V_MOV_B32_e32 111, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192_align2 = V_MOV_B32_e32 112, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192_align2 = V_MOV_B32_e32 113, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192_align2 = V_MOV_B32_e32 114, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192_align2 = V_MOV_B32_e32 115, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192_align2 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192_align2 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_512_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_512_align2 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub10:vreg_512_align2 = V_MOV_B32_e32 110, implicit $exec + %1.sub11:vreg_512_align2 = V_MOV_B32_e32 111, implicit $exec + %1.sub12:vreg_512_align2 = V_MOV_B32_e32 112, implicit $exec + %1.sub13:vreg_512_align2 = V_MOV_B32_e32 113, implicit $exec + %1.sub14:vreg_512_align2 = V_MOV_B32_e32 114, implicit $exec + %1.sub15:vreg_512_align2 = V_MOV_B32_e32 115, implicit $exec + S_NOP 0, implicit %1.sub10_sub11_sub12_sub13_sub14_sub15 + + undef %2.sub2:vreg_512_align2 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_512_align2 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_512_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_512_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_512_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_512_align2 = V_MOV_B32_e32 27, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5_sub6_sub7 + +... +--- +name: test_vreg_512_align2_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_512_align2_w256 + ; CHECK: undef %3.sub0:vreg_256_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256_align2 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256_align2 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256_align2 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256_align2 = V_MOV_B32_e32 19, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256_align2 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256_align2 = V_MOV_B32_e32 212, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256_align2 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256_align2 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256_align2 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_512_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_512_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_512_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_512_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_512_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_512_align2 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_512_align2 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_512_align2 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub2:vreg_512_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_512_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_512_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_512_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_512_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_512_align2 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_512_align2 = V_MOV_B32_e32 18, implicit $exec + %1.sub9:vreg_512_align2 = V_MOV_B32_e32 19, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9 + + undef %2.sub8:vreg_512_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_512_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_512_align2 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_512_align2 = V_MOV_B32_e32 211, implicit $exec + %2.sub12:vreg_512_align2 = V_MOV_B32_e32 212, implicit $exec + %2.sub13:vreg_512_align2 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_512_align2 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_512_align2 = V_MOV_B32_e32 215, implicit $exec + S_NOP 0, implicit %2.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15 + +... +# Skip test_vreg_1024_align2_w32 +--- +name: test_vreg_1024_align2_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_align2_w64 + ; CHECK: undef %3.sub0:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_64_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_64_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_64_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_64_align2 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_64_align2 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024_align2 = V_MOV_B32_e32 01, implicit $exec + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:vreg_1024_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024_align2 = V_MOV_B32_e32 13, implicit $exec + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub30:vreg_1024_align2 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024_align2 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub30_sub31 + +... +# Skip test_vreg_1024_align2_w96 +--- +name: test_vreg_1024_align2_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_align2_w128 + ; CHECK: undef %3.sub0:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_128_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_128_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_128_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_128_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_128_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_128_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_128_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_128_align2 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_128_align2 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_128_align2 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_128_align2 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024_align2 = V_MOV_B32_e32 03, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub2:vreg_1024_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024_align2 = V_MOV_B32_e32 15, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5 + + undef %2.sub28:vreg_1024_align2 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024_align2 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024_align2 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024_align2 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub28_sub29_sub30_sub31 + +... +# Skip test_vreg_1024_align2_w160 +--- +name: test_vreg_1024_align2_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_align2_w192 + ; CHECK: undef %3.sub0:vreg_192_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_192_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_192_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_192_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_192_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_192_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_192_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_192_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_192_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_192_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_192_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_192_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_192_align2 = V_MOV_B32_e32 226, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_192_align2 = V_MOV_B32_e32 227, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_192_align2 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_192_align2 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_192_align2 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_192_align2 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_1024_align2 = V_MOV_B32_e32 05, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub2:vreg_1024_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_1024_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_1024_align2 = V_MOV_B32_e32 17, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7 + + undef %2.sub26:vreg_1024_align2 = V_MOV_B32_e32 226, implicit $exec + %2.sub27:vreg_1024_align2 = V_MOV_B32_e32 227, implicit $exec + %2.sub28:vreg_1024_align2 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024_align2 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024_align2 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024_align2 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub26_sub27_sub28_sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_align2_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_align2_w256 + ; CHECK: undef %3.sub0:vreg_256_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_256_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_256_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_256_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_256_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_256_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_256_align2 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_256_align2 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_256_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_256_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_256_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_256_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_256_align2 = V_MOV_B32_e32 16, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_256_align2 = V_MOV_B32_e32 17, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_256_align2 = V_MOV_B32_e32 18, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_256_align2 = V_MOV_B32_e32 19, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_256_align2 = V_MOV_B32_e32 224, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_256_align2 = V_MOV_B32_e32 225, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_256_align2 = V_MOV_B32_e32 226, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_256_align2 = V_MOV_B32_e32 227, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_256_align2 = V_MOV_B32_e32 228, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_256_align2 = V_MOV_B32_e32 229, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_256_align2 = V_MOV_B32_e32 230, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_256_align2 = V_MOV_B32_e32 231, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_1024_align2 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_1024_align2 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_1024_align2 = V_MOV_B32_e32 07, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub2:vreg_1024_align2 = V_MOV_B32_e32 12, implicit $exec + %1.sub3:vreg_1024_align2 = V_MOV_B32_e32 13, implicit $exec + %1.sub4:vreg_1024_align2 = V_MOV_B32_e32 14, implicit $exec + %1.sub5:vreg_1024_align2 = V_MOV_B32_e32 15, implicit $exec + %1.sub6:vreg_1024_align2 = V_MOV_B32_e32 16, implicit $exec + %1.sub7:vreg_1024_align2 = V_MOV_B32_e32 17, implicit $exec + %1.sub8:vreg_1024_align2 = V_MOV_B32_e32 18, implicit $exec + %1.sub9:vreg_1024_align2 = V_MOV_B32_e32 19, implicit $exec + S_NOP 0, implicit %1.sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9 + + undef %2.sub24:vreg_1024_align2 = V_MOV_B32_e32 224, implicit $exec + %2.sub25:vreg_1024_align2 = V_MOV_B32_e32 225, implicit $exec + %2.sub26:vreg_1024_align2 = V_MOV_B32_e32 226, implicit $exec + %2.sub27:vreg_1024_align2 = V_MOV_B32_e32 227, implicit $exec + %2.sub28:vreg_1024_align2 = V_MOV_B32_e32 228, implicit $exec + %2.sub29:vreg_1024_align2 = V_MOV_B32_e32 229, implicit $exec + %2.sub30:vreg_1024_align2 = V_MOV_B32_e32 230, implicit $exec + %2.sub31:vreg_1024_align2 = V_MOV_B32_e32 231, implicit $exec + S_NOP 0, implicit %2.sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31 + +... +--- +name: test_vreg_1024_align2_w512 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_vreg_1024_align2_w512 + ; CHECK: undef %3.sub0:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: %3.sub1:vreg_512_align2 = V_MOV_B32_e32 1, implicit $exec + ; CHECK-NEXT: %3.sub2:vreg_512_align2 = V_MOV_B32_e32 2, implicit $exec + ; CHECK-NEXT: %3.sub3:vreg_512_align2 = V_MOV_B32_e32 3, implicit $exec + ; CHECK-NEXT: %3.sub4:vreg_512_align2 = V_MOV_B32_e32 4, implicit $exec + ; CHECK-NEXT: %3.sub5:vreg_512_align2 = V_MOV_B32_e32 5, implicit $exec + ; CHECK-NEXT: %3.sub6:vreg_512_align2 = V_MOV_B32_e32 6, implicit $exec + ; CHECK-NEXT: %3.sub7:vreg_512_align2 = V_MOV_B32_e32 7, implicit $exec + ; CHECK-NEXT: %3.sub8:vreg_512_align2 = V_MOV_B32_e32 8, implicit $exec + ; CHECK-NEXT: %3.sub9:vreg_512_align2 = V_MOV_B32_e32 9, implicit $exec + ; CHECK-NEXT: %3.sub10:vreg_512_align2 = V_MOV_B32_e32 10, implicit $exec + ; CHECK-NEXT: %3.sub11:vreg_512_align2 = V_MOV_B32_e32 11, implicit $exec + ; CHECK-NEXT: %3.sub12:vreg_512_align2 = V_MOV_B32_e32 12, implicit $exec + ; CHECK-NEXT: %3.sub13:vreg_512_align2 = V_MOV_B32_e32 13, implicit $exec + ; CHECK-NEXT: %3.sub14:vreg_512_align2 = V_MOV_B32_e32 14, implicit $exec + ; CHECK-NEXT: %3.sub15:vreg_512_align2 = V_MOV_B32_e32 15, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:vreg_512_align2 = V_MOV_B32_e32 116, implicit $exec + ; CHECK-NEXT: %4.sub1:vreg_512_align2 = V_MOV_B32_e32 117, implicit $exec + ; CHECK-NEXT: %4.sub2:vreg_512_align2 = V_MOV_B32_e32 118, implicit $exec + ; CHECK-NEXT: %4.sub3:vreg_512_align2 = V_MOV_B32_e32 119, implicit $exec + ; CHECK-NEXT: %4.sub4:vreg_512_align2 = V_MOV_B32_e32 120, implicit $exec + ; CHECK-NEXT: %4.sub5:vreg_512_align2 = V_MOV_B32_e32 121, implicit $exec + ; CHECK-NEXT: %4.sub6:vreg_512_align2 = V_MOV_B32_e32 122, implicit $exec + ; CHECK-NEXT: %4.sub7:vreg_512_align2 = V_MOV_B32_e32 123, implicit $exec + ; CHECK-NEXT: %4.sub8:vreg_512_align2 = V_MOV_B32_e32 124, implicit $exec + ; CHECK-NEXT: %4.sub9:vreg_512_align2 = V_MOV_B32_e32 125, implicit $exec + ; CHECK-NEXT: %4.sub10:vreg_512_align2 = V_MOV_B32_e32 126, implicit $exec + ; CHECK-NEXT: %4.sub11:vreg_512_align2 = V_MOV_B32_e32 127, implicit $exec + ; CHECK-NEXT: %4.sub12:vreg_512_align2 = V_MOV_B32_e32 128, implicit $exec + ; CHECK-NEXT: %4.sub13:vreg_512_align2 = V_MOV_B32_e32 129, implicit $exec + ; CHECK-NEXT: %4.sub14:vreg_512_align2 = V_MOV_B32_e32 130, implicit $exec + ; CHECK-NEXT: %4.sub15:vreg_512_align2 = V_MOV_B32_e32 131, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:vreg_512_align2 = V_MOV_B32_e32 22, implicit $exec + ; CHECK-NEXT: %5.sub1:vreg_512_align2 = V_MOV_B32_e32 23, implicit $exec + ; CHECK-NEXT: %5.sub2:vreg_512_align2 = V_MOV_B32_e32 24, implicit $exec + ; CHECK-NEXT: %5.sub3:vreg_512_align2 = V_MOV_B32_e32 25, implicit $exec + ; CHECK-NEXT: %5.sub4:vreg_512_align2 = V_MOV_B32_e32 26, implicit $exec + ; CHECK-NEXT: %5.sub5:vreg_512_align2 = V_MOV_B32_e32 27, implicit $exec + ; CHECK-NEXT: %5.sub6:vreg_512_align2 = V_MOV_B32_e32 28, implicit $exec + ; CHECK-NEXT: %5.sub7:vreg_512_align2 = V_MOV_B32_e32 29, implicit $exec + ; CHECK-NEXT: %5.sub8:vreg_512_align2 = V_MOV_B32_e32 210, implicit $exec + ; CHECK-NEXT: %5.sub9:vreg_512_align2 = V_MOV_B32_e32 211, implicit $exec + ; CHECK-NEXT: %5.sub10:vreg_512_align2 = V_MOV_B32_e32 212, implicit $exec + ; CHECK-NEXT: %5.sub11:vreg_512_align2 = V_MOV_B32_e32 213, implicit $exec + ; CHECK-NEXT: %5.sub12:vreg_512_align2 = V_MOV_B32_e32 214, implicit $exec + ; CHECK-NEXT: %5.sub13:vreg_512_align2 = V_MOV_B32_e32 215, implicit $exec + ; CHECK-NEXT: %5.sub14:vreg_512_align2 = V_MOV_B32_e32 216, implicit $exec + ; CHECK-NEXT: %5.sub15:vreg_512_align2 = V_MOV_B32_e32 217, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:vreg_1024_align2 = V_MOV_B32_e32 00, implicit $exec + %0.sub1:vreg_1024_align2 = V_MOV_B32_e32 01, implicit $exec + %0.sub2:vreg_1024_align2 = V_MOV_B32_e32 02, implicit $exec + %0.sub3:vreg_1024_align2 = V_MOV_B32_e32 03, implicit $exec + %0.sub4:vreg_1024_align2 = V_MOV_B32_e32 04, implicit $exec + %0.sub5:vreg_1024_align2 = V_MOV_B32_e32 05, implicit $exec + %0.sub6:vreg_1024_align2 = V_MOV_B32_e32 06, implicit $exec + %0.sub7:vreg_1024_align2 = V_MOV_B32_e32 07, implicit $exec + %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 08, implicit $exec + %0.sub9:vreg_1024_align2 = V_MOV_B32_e32 09, implicit $exec + %0.sub10:vreg_1024_align2 = V_MOV_B32_e32 010, implicit $exec + %0.sub11:vreg_1024_align2 = V_MOV_B32_e32 011, implicit $exec + %0.sub12:vreg_1024_align2 = V_MOV_B32_e32 012, implicit $exec + %0.sub13:vreg_1024_align2 = V_MOV_B32_e32 013, implicit $exec + %0.sub14:vreg_1024_align2 = V_MOV_B32_e32 014, implicit $exec + %0.sub15:vreg_1024_align2 = V_MOV_B32_e32 015, implicit $exec + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15 + + undef %1.sub16:vreg_1024_align2 = V_MOV_B32_e32 116, implicit $exec + %1.sub17:vreg_1024_align2 = V_MOV_B32_e32 117, implicit $exec + %1.sub18:vreg_1024_align2 = V_MOV_B32_e32 118, implicit $exec + %1.sub19:vreg_1024_align2 = V_MOV_B32_e32 119, implicit $exec + %1.sub20:vreg_1024_align2 = V_MOV_B32_e32 120, implicit $exec + %1.sub21:vreg_1024_align2 = V_MOV_B32_e32 121, implicit $exec + %1.sub22:vreg_1024_align2 = V_MOV_B32_e32 122, implicit $exec + %1.sub23:vreg_1024_align2 = V_MOV_B32_e32 123, implicit $exec + %1.sub24:vreg_1024_align2 = V_MOV_B32_e32 124, implicit $exec + %1.sub25:vreg_1024_align2 = V_MOV_B32_e32 125, implicit $exec + %1.sub26:vreg_1024_align2 = V_MOV_B32_e32 126, implicit $exec + %1.sub27:vreg_1024_align2 = V_MOV_B32_e32 127, implicit $exec + %1.sub28:vreg_1024_align2 = V_MOV_B32_e32 128, implicit $exec + %1.sub29:vreg_1024_align2 = V_MOV_B32_e32 129, implicit $exec + %1.sub30:vreg_1024_align2 = V_MOV_B32_e32 130, implicit $exec + %1.sub31:vreg_1024_align2 = V_MOV_B32_e32 131, implicit $exec + S_NOP 0, implicit %1.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31 + + undef %2.sub2:vreg_1024_align2 = V_MOV_B32_e32 22, implicit $exec + %2.sub3:vreg_1024_align2 = V_MOV_B32_e32 23, implicit $exec + %2.sub4:vreg_1024_align2 = V_MOV_B32_e32 24, implicit $exec + %2.sub5:vreg_1024_align2 = V_MOV_B32_e32 25, implicit $exec + %2.sub6:vreg_1024_align2 = V_MOV_B32_e32 26, implicit $exec + %2.sub7:vreg_1024_align2 = V_MOV_B32_e32 27, implicit $exec + %2.sub8:vreg_1024_align2 = V_MOV_B32_e32 28, implicit $exec + %2.sub9:vreg_1024_align2 = V_MOV_B32_e32 29, implicit $exec + %2.sub10:vreg_1024_align2 = V_MOV_B32_e32 210, implicit $exec + %2.sub11:vreg_1024_align2 = V_MOV_B32_e32 211, implicit $exec + %2.sub12:vreg_1024_align2 = V_MOV_B32_e32 212, implicit $exec + %2.sub13:vreg_1024_align2 = V_MOV_B32_e32 213, implicit $exec + %2.sub14:vreg_1024_align2 = V_MOV_B32_e32 214, implicit $exec + %2.sub15:vreg_1024_align2 = V_MOV_B32_e32 215, implicit $exec + %2.sub16:vreg_1024_align2 = V_MOV_B32_e32 216, implicit $exec + %2.sub17:vreg_1024_align2 = V_MOV_B32_e32 217, implicit $exec + S_NOP 0, implicit %2.sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15_sub16_sub17 + +... +--- +name: test_subregs_composition_sgpr_1024 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_subregs_composition_sgpr_1024 + ; CHECK: undef %2.sub0:sgpr_288 = S_MOV_B32 34 + ; CHECK-NEXT: %2.sub4:sgpr_288 = S_MOV_B32 38 + ; CHECK-NEXT: S_NOP 0, implicit %2.sub0_sub1_sub2_sub3_sub4 + ; CHECK-NEXT: S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8 + ; CHECK-NEXT: undef %3.sub0:sgpr_320 = S_MOV_B32 44 + ; CHECK-NEXT: %3.sub4:sgpr_320 = S_MOV_B32 48 + ; CHECK-NEXT: S_NOP 0, implicit %3.sub0_sub1_sub2_sub3_sub4_sub5 + ; CHECK-NEXT: S_NOP 0, implicit %3.sub4_sub5_sub6_sub7_sub8_sub9 + undef %3.sub4:sgpr_1024 = S_MOV_B32 34 + %3.sub8:sgpr_1024 = S_MOV_B32 38 + S_NOP 0, implicit %3.sub4_sub5_sub6_sub7_sub8 + S_NOP 0, implicit %3.sub8_sub9_sub10_sub11_sub12 + + undef %4.sub4:sgpr_1024 = S_MOV_B32 44 + %4.sub8:sgpr_1024 = S_MOV_B32 48 + S_NOP 0, implicit %4.sub4_sub5_sub6_sub7_sub8_sub9 + S_NOP 0, implicit %4.sub8_sub9_sub10_sub11_sub12_sub13 + +... +--- +name: test_sgpr_64_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_64_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + undef %0.sub0:sgpr_64 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_64 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + +... +--- +name: test_sgpr_96_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_96_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 22 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_96 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_96 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub2:sgpr_96 = S_MOV_B32 22 + S_NOP 0, implicit %2.sub2 + +... +# Skip test_sgpr_96_w64 +--- +name: test_sgpr_128_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_128_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 23 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_128 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_128 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub3:sgpr_128 = S_MOV_B32 23 + S_NOP 0, implicit %2.sub3 + +... +--- +name: test_sgpr_128_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_128_w64 + ; CHECK: undef %2.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %2.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:sgpr_64 = S_MOV_B32 12 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 13 + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:sgpr_128 = S_MOV_B32 00 + %0.sub1:sgpr_128 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:sgpr_128 = S_MOV_B32 12 + %1.sub3:sgpr_128 = S_MOV_B32 13 + S_NOP 0, implicit %1.sub2_sub3 + +... +# Skip test_sgpr_128_w96 +--- +name: test_sgpr_160_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_160_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 24 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_160 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_160 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub4:sgpr_160 = S_MOV_B32 24 + S_NOP 0, implicit %2.sub4 + +... +# Skip test_sgpr_160_w64 +# Skip test_sgpr_160_w96 +# Skip test_sgpr_160_w128 +--- +name: test_sgpr_192_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_192_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 25 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_192 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_192 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub5:sgpr_192 = S_MOV_B32 25 + S_NOP 0, implicit %2.sub5 + +... +--- +name: test_sgpr_192_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_192_w64 + ; CHECK: undef %3.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_64 = S_MOV_B32 12 + ; CHECK-NEXT: %4.sub1:sgpr_64 = S_MOV_B32 13 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_64 = S_MOV_B32 24 + ; CHECK-NEXT: %5.sub1:sgpr_64 = S_MOV_B32 25 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_192 = S_MOV_B32 00 + %0.sub1:sgpr_192 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:sgpr_192 = S_MOV_B32 12 + %1.sub3:sgpr_192 = S_MOV_B32 13 + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub4:sgpr_192 = S_MOV_B32 24 + %2.sub5:sgpr_192 = S_MOV_B32 25 + S_NOP 0, implicit %2.sub4_sub5 + +... +# Skip test_sgpr_192_w96 +# Skip test_sgpr_192_w128 +# Skip test_sgpr_192_w160 +--- +name: test_sgpr_224_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_224_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 26 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_224 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_224 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub6:sgpr_224 = S_MOV_B32 26 + S_NOP 0, implicit %2.sub6 + +... +# Skip test_sgpr_224_w64 +# Skip test_sgpr_224_w96 +# Skip test_sgpr_224_w128 +# Skip test_sgpr_224_w160 +# Skip test_sgpr_224_w192 +--- +name: test_sgpr_256_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_256_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 27 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_256 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_256 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub7:sgpr_256 = S_MOV_B32 27 + S_NOP 0, implicit %2.sub7 + +... +--- +name: test_sgpr_256_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_256_w64 + ; CHECK: undef %3.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_64 = S_MOV_B32 12 + ; CHECK-NEXT: %4.sub1:sgpr_64 = S_MOV_B32 13 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_64 = S_MOV_B32 26 + ; CHECK-NEXT: %5.sub1:sgpr_64 = S_MOV_B32 27 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_256 = S_MOV_B32 00 + %0.sub1:sgpr_256 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:sgpr_256 = S_MOV_B32 12 + %1.sub3:sgpr_256 = S_MOV_B32 13 + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub6:sgpr_256 = S_MOV_B32 26 + %2.sub7:sgpr_256 = S_MOV_B32 27 + S_NOP 0, implicit %2.sub6_sub7 + +... +# Skip test_sgpr_256_w96 +--- +name: test_sgpr_256_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_256_w128 + ; CHECK: undef %2.sub0:sgpr_128 = S_MOV_B32 0 + ; CHECK-NEXT: %2.sub1:sgpr_128 = S_MOV_B32 1 + ; CHECK-NEXT: %2.sub2:sgpr_128 = S_MOV_B32 2 + ; CHECK-NEXT: %2.sub3:sgpr_128 = S_MOV_B32 3 + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:sgpr_128 = S_MOV_B32 14 + ; CHECK-NEXT: %3.sub1:sgpr_128 = S_MOV_B32 15 + ; CHECK-NEXT: %3.sub2:sgpr_128 = S_MOV_B32 16 + ; CHECK-NEXT: %3.sub3:sgpr_128 = S_MOV_B32 17 + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:sgpr_256 = S_MOV_B32 00 + %0.sub1:sgpr_256 = S_MOV_B32 01 + %0.sub2:sgpr_256 = S_MOV_B32 02 + %0.sub3:sgpr_256 = S_MOV_B32 03 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub4:sgpr_256 = S_MOV_B32 14 + %1.sub5:sgpr_256 = S_MOV_B32 15 + %1.sub6:sgpr_256 = S_MOV_B32 16 + %1.sub7:sgpr_256 = S_MOV_B32 17 + S_NOP 0, implicit %1.sub4_sub5_sub6_sub7 + +... +# Skip test_sgpr_256_w160 +# Skip test_sgpr_256_w192 +--- +name: test_sgpr_288_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_288_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 28 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_288 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_288 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub8:sgpr_288 = S_MOV_B32 28 + S_NOP 0, implicit %2.sub8 + +... +# Skip test_sgpr_288_w64 +# Skip test_sgpr_288_w96 +# Skip test_sgpr_288_w128 +--- +name: test_sgpr_288_w160 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_288_w160 + ; CHECK: undef %2.sub0:sgpr_160 = S_MOV_B32 0 + ; CHECK-NEXT: %2.sub1:sgpr_160 = S_MOV_B32 1 + ; CHECK-NEXT: %2.sub2:sgpr_160 = S_MOV_B32 2 + ; CHECK-NEXT: %2.sub3:sgpr_160 = S_MOV_B32 3 + ; CHECK-NEXT: %2.sub4:sgpr_160 = S_MOV_B32 4 + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:sgpr_160 = S_MOV_B32 14 + ; CHECK-NEXT: %3.sub1:sgpr_160 = S_MOV_B32 15 + ; CHECK-NEXT: %3.sub2:sgpr_160 = S_MOV_B32 16 + ; CHECK-NEXT: %3.sub3:sgpr_160 = S_MOV_B32 17 + ; CHECK-NEXT: %3.sub4:sgpr_160 = S_MOV_B32 18 + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:sgpr_288 = S_MOV_B32 00 + %0.sub1:sgpr_288 = S_MOV_B32 01 + %0.sub2:sgpr_288 = S_MOV_B32 02 + %0.sub3:sgpr_288 = S_MOV_B32 03 + %0.sub4:sgpr_288 = S_MOV_B32 04 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4 + + undef %1.sub4:sgpr_288 = S_MOV_B32 14 + %1.sub5:sgpr_288 = S_MOV_B32 15 + %1.sub6:sgpr_288 = S_MOV_B32 16 + %1.sub7:sgpr_288 = S_MOV_B32 17 + %1.sub8:sgpr_288 = S_MOV_B32 18 + S_NOP 0, implicit %1.sub4_sub5_sub6_sub7_sub8 + +... +# Skip test_sgpr_288_w192 +# Skip test_sgpr_288_w256 +--- +name: test_sgpr_320_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_320_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 29 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_320 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_320 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub9:sgpr_320 = S_MOV_B32 29 + S_NOP 0, implicit %2.sub9 + +... +--- +name: test_sgpr_320_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_320_w64 + ; CHECK: undef %3.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_64 = S_MOV_B32 12 + ; CHECK-NEXT: %4.sub1:sgpr_64 = S_MOV_B32 13 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_64 = S_MOV_B32 28 + ; CHECK-NEXT: %5.sub1:sgpr_64 = S_MOV_B32 29 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_320 = S_MOV_B32 00 + %0.sub1:sgpr_320 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:sgpr_320 = S_MOV_B32 12 + %1.sub3:sgpr_320 = S_MOV_B32 13 + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub8:sgpr_320 = S_MOV_B32 28 + %2.sub9:sgpr_320 = S_MOV_B32 29 + S_NOP 0, implicit %2.sub8_sub9 + +... +# Skip test_sgpr_320_w96 +# Skip test_sgpr_320_w128 +# Skip test_sgpr_320_w160 +--- +name: test_sgpr_320_w192 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_320_w192 + ; CHECK: undef %2.sub0:sgpr_192 = S_MOV_B32 0 + ; CHECK-NEXT: %2.sub1:sgpr_192 = S_MOV_B32 1 + ; CHECK-NEXT: %2.sub2:sgpr_192 = S_MOV_B32 2 + ; CHECK-NEXT: %2.sub3:sgpr_192 = S_MOV_B32 3 + ; CHECK-NEXT: %2.sub4:sgpr_192 = S_MOV_B32 4 + ; CHECK-NEXT: %2.sub5:sgpr_192 = S_MOV_B32 5 + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:sgpr_192 = S_MOV_B32 14 + ; CHECK-NEXT: %3.sub1:sgpr_192 = S_MOV_B32 15 + ; CHECK-NEXT: %3.sub2:sgpr_192 = S_MOV_B32 16 + ; CHECK-NEXT: %3.sub3:sgpr_192 = S_MOV_B32 17 + ; CHECK-NEXT: %3.sub4:sgpr_192 = S_MOV_B32 18 + ; CHECK-NEXT: %3.sub5:sgpr_192 = S_MOV_B32 19 + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:sgpr_320 = S_MOV_B32 00 + %0.sub1:sgpr_320 = S_MOV_B32 01 + %0.sub2:sgpr_320 = S_MOV_B32 02 + %0.sub3:sgpr_320 = S_MOV_B32 03 + %0.sub4:sgpr_320 = S_MOV_B32 04 + %0.sub5:sgpr_320 = S_MOV_B32 05 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5 + + undef %1.sub4:sgpr_320 = S_MOV_B32 14 + %1.sub5:sgpr_320 = S_MOV_B32 15 + %1.sub6:sgpr_320 = S_MOV_B32 16 + %1.sub7:sgpr_320 = S_MOV_B32 17 + %1.sub8:sgpr_320 = S_MOV_B32 18 + %1.sub9:sgpr_320 = S_MOV_B32 19 + S_NOP 0, implicit %1.sub4_sub5_sub6_sub7_sub8_sub9 + +... +# Skip test_sgpr_320_w256 +--- +name: test_sgpr_352_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_352_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 210 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_352 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_352 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub10:sgpr_352 = S_MOV_B32 210 + S_NOP 0, implicit %2.sub10 + +... +# Skip test_sgpr_352_w64 +# Skip test_sgpr_352_w96 +# Skip test_sgpr_352_w128 +# Skip test_sgpr_352_w160 +# Skip test_sgpr_352_w192 +# Skip test_sgpr_352_w256 +--- +name: test_sgpr_384_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_384_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 211 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_384 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_384 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub11:sgpr_384 = S_MOV_B32 211 + S_NOP 0, implicit %2.sub11 + +... +--- +name: test_sgpr_384_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_384_w64 + ; CHECK: undef %3.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_64 = S_MOV_B32 110 + ; CHECK-NEXT: %4.sub1:sgpr_64 = S_MOV_B32 111 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_64 = S_MOV_B32 22 + ; CHECK-NEXT: %5.sub1:sgpr_64 = S_MOV_B32 23 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_384 = S_MOV_B32 00 + %0.sub1:sgpr_384 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub10:sgpr_384 = S_MOV_B32 110 + %1.sub11:sgpr_384 = S_MOV_B32 111 + S_NOP 0, implicit %1.sub10_sub11 + + undef %2.sub2:sgpr_384 = S_MOV_B32 22 + %2.sub3:sgpr_384 = S_MOV_B32 23 + S_NOP 0, implicit %2.sub2_sub3 + +... +# Skip test_sgpr_384_w96 +--- +name: test_sgpr_384_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_384_w128 + ; CHECK: undef %3.sub0:sgpr_128 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_128 = S_MOV_B32 1 + ; CHECK-NEXT: %3.sub2:sgpr_128 = S_MOV_B32 2 + ; CHECK-NEXT: %3.sub3:sgpr_128 = S_MOV_B32 3 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_128 = S_MOV_B32 14 + ; CHECK-NEXT: %4.sub1:sgpr_128 = S_MOV_B32 15 + ; CHECK-NEXT: %4.sub2:sgpr_128 = S_MOV_B32 16 + ; CHECK-NEXT: %4.sub3:sgpr_128 = S_MOV_B32 17 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_128 = S_MOV_B32 28 + ; CHECK-NEXT: %5.sub1:sgpr_128 = S_MOV_B32 29 + ; CHECK-NEXT: %5.sub2:sgpr_128 = S_MOV_B32 210 + ; CHECK-NEXT: %5.sub3:sgpr_128 = S_MOV_B32 211 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_384 = S_MOV_B32 00 + %0.sub1:sgpr_384 = S_MOV_B32 01 + %0.sub2:sgpr_384 = S_MOV_B32 02 + %0.sub3:sgpr_384 = S_MOV_B32 03 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub4:sgpr_384 = S_MOV_B32 14 + %1.sub5:sgpr_384 = S_MOV_B32 15 + %1.sub6:sgpr_384 = S_MOV_B32 16 + %1.sub7:sgpr_384 = S_MOV_B32 17 + S_NOP 0, implicit %1.sub4_sub5_sub6_sub7 + + undef %2.sub8:sgpr_384 = S_MOV_B32 28 + %2.sub9:sgpr_384 = S_MOV_B32 29 + %2.sub10:sgpr_384 = S_MOV_B32 210 + %2.sub11:sgpr_384 = S_MOV_B32 211 + S_NOP 0, implicit %2.sub8_sub9_sub10_sub11 + +... +# Skip test_sgpr_384_w160 +# Skip test_sgpr_384_w192 +--- +name: test_sgpr_384_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_384_w256 + ; CHECK: undef %2.sub0:sgpr_256 = S_MOV_B32 0 + ; CHECK-NEXT: %2.sub1:sgpr_256 = S_MOV_B32 1 + ; CHECK-NEXT: %2.sub2:sgpr_256 = S_MOV_B32 2 + ; CHECK-NEXT: %2.sub3:sgpr_256 = S_MOV_B32 3 + ; CHECK-NEXT: %2.sub4:sgpr_256 = S_MOV_B32 4 + ; CHECK-NEXT: %2.sub5:sgpr_256 = S_MOV_B32 5 + ; CHECK-NEXT: %2.sub6:sgpr_256 = S_MOV_B32 6 + ; CHECK-NEXT: %2.sub7:sgpr_256 = S_MOV_B32 7 + ; CHECK-NEXT: S_NOP 0, implicit %2 + ; CHECK-NEXT: undef %3.sub0:sgpr_256 = S_MOV_B32 14 + ; CHECK-NEXT: %3.sub1:sgpr_256 = S_MOV_B32 15 + ; CHECK-NEXT: %3.sub2:sgpr_256 = S_MOV_B32 16 + ; CHECK-NEXT: %3.sub3:sgpr_256 = S_MOV_B32 17 + ; CHECK-NEXT: %3.sub4:sgpr_256 = S_MOV_B32 18 + ; CHECK-NEXT: %3.sub5:sgpr_256 = S_MOV_B32 19 + ; CHECK-NEXT: %3.sub6:sgpr_256 = S_MOV_B32 110 + ; CHECK-NEXT: %3.sub7:sgpr_256 = S_MOV_B32 111 + ; CHECK-NEXT: S_NOP 0, implicit %3 + undef %0.sub0:sgpr_384 = S_MOV_B32 00 + %0.sub1:sgpr_384 = S_MOV_B32 01 + %0.sub2:sgpr_384 = S_MOV_B32 02 + %0.sub3:sgpr_384 = S_MOV_B32 03 + %0.sub4:sgpr_384 = S_MOV_B32 04 + %0.sub5:sgpr_384 = S_MOV_B32 05 + %0.sub6:sgpr_384 = S_MOV_B32 06 + %0.sub7:sgpr_384 = S_MOV_B32 07 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub4:sgpr_384 = S_MOV_B32 14 + %1.sub5:sgpr_384 = S_MOV_B32 15 + %1.sub6:sgpr_384 = S_MOV_B32 16 + %1.sub7:sgpr_384 = S_MOV_B32 17 + %1.sub8:sgpr_384 = S_MOV_B32 18 + %1.sub9:sgpr_384 = S_MOV_B32 19 + %1.sub10:sgpr_384 = S_MOV_B32 110 + %1.sub11:sgpr_384 = S_MOV_B32 111 + S_NOP 0, implicit %1.sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11 + +... +--- +name: test_sgpr_512_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_512_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 215 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_512 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_512 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub15:sgpr_512 = S_MOV_B32 215 + S_NOP 0, implicit %2.sub15 + +... +--- +name: test_sgpr_512_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_512_w64 + ; CHECK: undef %3.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_64 = S_MOV_B32 114 + ; CHECK-NEXT: %4.sub1:sgpr_64 = S_MOV_B32 115 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_64 = S_MOV_B32 22 + ; CHECK-NEXT: %5.sub1:sgpr_64 = S_MOV_B32 23 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_512 = S_MOV_B32 00 + %0.sub1:sgpr_512 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub14:sgpr_512 = S_MOV_B32 114 + %1.sub15:sgpr_512 = S_MOV_B32 115 + S_NOP 0, implicit %1.sub14_sub15 + + undef %2.sub2:sgpr_512 = S_MOV_B32 22 + %2.sub3:sgpr_512 = S_MOV_B32 23 + S_NOP 0, implicit %2.sub2_sub3 + +... +# Skip test_sgpr_512_w96 +--- +name: test_sgpr_512_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_512_w128 + ; CHECK: undef %3.sub0:sgpr_128 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_128 = S_MOV_B32 1 + ; CHECK-NEXT: %3.sub2:sgpr_128 = S_MOV_B32 2 + ; CHECK-NEXT: %3.sub3:sgpr_128 = S_MOV_B32 3 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_128 = S_MOV_B32 112 + ; CHECK-NEXT: %4.sub1:sgpr_128 = S_MOV_B32 113 + ; CHECK-NEXT: %4.sub2:sgpr_128 = S_MOV_B32 114 + ; CHECK-NEXT: %4.sub3:sgpr_128 = S_MOV_B32 115 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_128 = S_MOV_B32 24 + ; CHECK-NEXT: %5.sub1:sgpr_128 = S_MOV_B32 25 + ; CHECK-NEXT: %5.sub2:sgpr_128 = S_MOV_B32 26 + ; CHECK-NEXT: %5.sub3:sgpr_128 = S_MOV_B32 27 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_512 = S_MOV_B32 00 + %0.sub1:sgpr_512 = S_MOV_B32 01 + %0.sub2:sgpr_512 = S_MOV_B32 02 + %0.sub3:sgpr_512 = S_MOV_B32 03 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub12:sgpr_512 = S_MOV_B32 112 + %1.sub13:sgpr_512 = S_MOV_B32 113 + %1.sub14:sgpr_512 = S_MOV_B32 114 + %1.sub15:sgpr_512 = S_MOV_B32 115 + S_NOP 0, implicit %1.sub12_sub13_sub14_sub15 + + undef %2.sub4:sgpr_512 = S_MOV_B32 24 + %2.sub5:sgpr_512 = S_MOV_B32 25 + %2.sub6:sgpr_512 = S_MOV_B32 26 + %2.sub7:sgpr_512 = S_MOV_B32 27 + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7 + +... +# Skip test_sgpr_512_w160 +# Skip test_sgpr_512_w192 +--- +name: test_sgpr_512_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_512_w256 + ; CHECK: undef %3.sub0:sgpr_256 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_256 = S_MOV_B32 1 + ; CHECK-NEXT: %3.sub2:sgpr_256 = S_MOV_B32 2 + ; CHECK-NEXT: %3.sub3:sgpr_256 = S_MOV_B32 3 + ; CHECK-NEXT: %3.sub4:sgpr_256 = S_MOV_B32 4 + ; CHECK-NEXT: %3.sub5:sgpr_256 = S_MOV_B32 5 + ; CHECK-NEXT: %3.sub6:sgpr_256 = S_MOV_B32 6 + ; CHECK-NEXT: %3.sub7:sgpr_256 = S_MOV_B32 7 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_256 = S_MOV_B32 14 + ; CHECK-NEXT: %4.sub1:sgpr_256 = S_MOV_B32 15 + ; CHECK-NEXT: %4.sub2:sgpr_256 = S_MOV_B32 16 + ; CHECK-NEXT: %4.sub3:sgpr_256 = S_MOV_B32 17 + ; CHECK-NEXT: %4.sub4:sgpr_256 = S_MOV_B32 18 + ; CHECK-NEXT: %4.sub5:sgpr_256 = S_MOV_B32 19 + ; CHECK-NEXT: %4.sub6:sgpr_256 = S_MOV_B32 110 + ; CHECK-NEXT: %4.sub7:sgpr_256 = S_MOV_B32 111 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_256 = S_MOV_B32 28 + ; CHECK-NEXT: %5.sub1:sgpr_256 = S_MOV_B32 29 + ; CHECK-NEXT: %5.sub2:sgpr_256 = S_MOV_B32 210 + ; CHECK-NEXT: %5.sub3:sgpr_256 = S_MOV_B32 211 + ; CHECK-NEXT: %5.sub4:sgpr_256 = S_MOV_B32 212 + ; CHECK-NEXT: %5.sub5:sgpr_256 = S_MOV_B32 213 + ; CHECK-NEXT: %5.sub6:sgpr_256 = S_MOV_B32 214 + ; CHECK-NEXT: %5.sub7:sgpr_256 = S_MOV_B32 215 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_512 = S_MOV_B32 00 + %0.sub1:sgpr_512 = S_MOV_B32 01 + %0.sub2:sgpr_512 = S_MOV_B32 02 + %0.sub3:sgpr_512 = S_MOV_B32 03 + %0.sub4:sgpr_512 = S_MOV_B32 04 + %0.sub5:sgpr_512 = S_MOV_B32 05 + %0.sub6:sgpr_512 = S_MOV_B32 06 + %0.sub7:sgpr_512 = S_MOV_B32 07 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub4:sgpr_512 = S_MOV_B32 14 + %1.sub5:sgpr_512 = S_MOV_B32 15 + %1.sub6:sgpr_512 = S_MOV_B32 16 + %1.sub7:sgpr_512 = S_MOV_B32 17 + %1.sub8:sgpr_512 = S_MOV_B32 18 + %1.sub9:sgpr_512 = S_MOV_B32 19 + %1.sub10:sgpr_512 = S_MOV_B32 110 + %1.sub11:sgpr_512 = S_MOV_B32 111 + S_NOP 0, implicit %1.sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11 + + undef %2.sub8:sgpr_512 = S_MOV_B32 28 + %2.sub9:sgpr_512 = S_MOV_B32 29 + %2.sub10:sgpr_512 = S_MOV_B32 210 + %2.sub11:sgpr_512 = S_MOV_B32 211 + %2.sub12:sgpr_512 = S_MOV_B32 212 + %2.sub13:sgpr_512 = S_MOV_B32 213 + %2.sub14:sgpr_512 = S_MOV_B32 214 + %2.sub15:sgpr_512 = S_MOV_B32 215 + S_NOP 0, implicit %2.sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15 + +... +--- +name: test_sgpr_1024_w32 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_1024_w32 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_]] + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 11 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_1]] + ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 231 + ; CHECK-NEXT: S_NOP 0, implicit [[S_MOV_B32_2]] + undef %0.sub0:sgpr_1024 = S_MOV_B32 00 + S_NOP 0, implicit %0.sub0 + + undef %1.sub1:sgpr_1024 = S_MOV_B32 11 + S_NOP 0, implicit %1.sub1 + + undef %2.sub31:sgpr_1024 = S_MOV_B32 231 + S_NOP 0, implicit %2.sub31 + +... +--- +name: test_sgpr_1024_w64 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_1024_w64 + ; CHECK: undef %3.sub0:sgpr_64 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_64 = S_MOV_B32 1 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_64 = S_MOV_B32 12 + ; CHECK-NEXT: %4.sub1:sgpr_64 = S_MOV_B32 13 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_64 = S_MOV_B32 230 + ; CHECK-NEXT: %5.sub1:sgpr_64 = S_MOV_B32 231 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_1024 = S_MOV_B32 00 + %0.sub1:sgpr_1024 = S_MOV_B32 01 + S_NOP 0, implicit %0.sub0_sub1 + + undef %1.sub2:sgpr_1024 = S_MOV_B32 12 + %1.sub3:sgpr_1024 = S_MOV_B32 13 + S_NOP 0, implicit %1.sub2_sub3 + + undef %2.sub30:sgpr_1024 = S_MOV_B32 230 + %2.sub31:sgpr_1024 = S_MOV_B32 231 + S_NOP 0, implicit %2.sub30_sub31 + +... +# Skip test_sgpr_1024_w96 +--- +name: test_sgpr_1024_w128 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_1024_w128 + ; CHECK: undef %3.sub0:sgpr_128 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_128 = S_MOV_B32 1 + ; CHECK-NEXT: %3.sub2:sgpr_128 = S_MOV_B32 2 + ; CHECK-NEXT: %3.sub3:sgpr_128 = S_MOV_B32 3 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_128 = S_MOV_B32 128 + ; CHECK-NEXT: %4.sub1:sgpr_128 = S_MOV_B32 129 + ; CHECK-NEXT: %4.sub2:sgpr_128 = S_MOV_B32 130 + ; CHECK-NEXT: %4.sub3:sgpr_128 = S_MOV_B32 131 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_128 = S_MOV_B32 24 + ; CHECK-NEXT: %5.sub1:sgpr_128 = S_MOV_B32 25 + ; CHECK-NEXT: %5.sub2:sgpr_128 = S_MOV_B32 26 + ; CHECK-NEXT: %5.sub3:sgpr_128 = S_MOV_B32 27 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_1024 = S_MOV_B32 00 + %0.sub1:sgpr_1024 = S_MOV_B32 01 + %0.sub2:sgpr_1024 = S_MOV_B32 02 + %0.sub3:sgpr_1024 = S_MOV_B32 03 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3 + + undef %1.sub28:sgpr_1024 = S_MOV_B32 128 + %1.sub29:sgpr_1024 = S_MOV_B32 129 + %1.sub30:sgpr_1024 = S_MOV_B32 130 + %1.sub31:sgpr_1024 = S_MOV_B32 131 + S_NOP 0, implicit %1.sub28_sub29_sub30_sub31 + + undef %2.sub4:sgpr_1024 = S_MOV_B32 24 + %2.sub5:sgpr_1024 = S_MOV_B32 25 + %2.sub6:sgpr_1024 = S_MOV_B32 26 + %2.sub7:sgpr_1024 = S_MOV_B32 27 + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7 + +... +# Skip test_sgpr_1024_w160 +# Skip test_sgpr_1024_w192 +--- +name: test_sgpr_1024_w256 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_1024_w256 + ; CHECK: undef %3.sub0:sgpr_256 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_256 = S_MOV_B32 1 + ; CHECK-NEXT: %3.sub2:sgpr_256 = S_MOV_B32 2 + ; CHECK-NEXT: %3.sub3:sgpr_256 = S_MOV_B32 3 + ; CHECK-NEXT: %3.sub4:sgpr_256 = S_MOV_B32 4 + ; CHECK-NEXT: %3.sub5:sgpr_256 = S_MOV_B32 5 + ; CHECK-NEXT: %3.sub6:sgpr_256 = S_MOV_B32 6 + ; CHECK-NEXT: %3.sub7:sgpr_256 = S_MOV_B32 7 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_256 = S_MOV_B32 124 + ; CHECK-NEXT: %4.sub1:sgpr_256 = S_MOV_B32 125 + ; CHECK-NEXT: %4.sub2:sgpr_256 = S_MOV_B32 126 + ; CHECK-NEXT: %4.sub3:sgpr_256 = S_MOV_B32 127 + ; CHECK-NEXT: %4.sub4:sgpr_256 = S_MOV_B32 128 + ; CHECK-NEXT: %4.sub5:sgpr_256 = S_MOV_B32 129 + ; CHECK-NEXT: %4.sub6:sgpr_256 = S_MOV_B32 130 + ; CHECK-NEXT: %4.sub7:sgpr_256 = S_MOV_B32 131 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_256 = S_MOV_B32 24 + ; CHECK-NEXT: %5.sub1:sgpr_256 = S_MOV_B32 25 + ; CHECK-NEXT: %5.sub2:sgpr_256 = S_MOV_B32 26 + ; CHECK-NEXT: %5.sub3:sgpr_256 = S_MOV_B32 27 + ; CHECK-NEXT: %5.sub4:sgpr_256 = S_MOV_B32 28 + ; CHECK-NEXT: %5.sub5:sgpr_256 = S_MOV_B32 29 + ; CHECK-NEXT: %5.sub6:sgpr_256 = S_MOV_B32 210 + ; CHECK-NEXT: %5.sub7:sgpr_256 = S_MOV_B32 211 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_1024 = S_MOV_B32 00 + %0.sub1:sgpr_1024 = S_MOV_B32 01 + %0.sub2:sgpr_1024 = S_MOV_B32 02 + %0.sub3:sgpr_1024 = S_MOV_B32 03 + %0.sub4:sgpr_1024 = S_MOV_B32 04 + %0.sub5:sgpr_1024 = S_MOV_B32 05 + %0.sub6:sgpr_1024 = S_MOV_B32 06 + %0.sub7:sgpr_1024 = S_MOV_B32 07 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7 + + undef %1.sub24:sgpr_1024 = S_MOV_B32 124 + %1.sub25:sgpr_1024 = S_MOV_B32 125 + %1.sub26:sgpr_1024 = S_MOV_B32 126 + %1.sub27:sgpr_1024 = S_MOV_B32 127 + %1.sub28:sgpr_1024 = S_MOV_B32 128 + %1.sub29:sgpr_1024 = S_MOV_B32 129 + %1.sub30:sgpr_1024 = S_MOV_B32 130 + %1.sub31:sgpr_1024 = S_MOV_B32 131 + S_NOP 0, implicit %1.sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31 + + undef %2.sub4:sgpr_1024 = S_MOV_B32 24 + %2.sub5:sgpr_1024 = S_MOV_B32 25 + %2.sub6:sgpr_1024 = S_MOV_B32 26 + %2.sub7:sgpr_1024 = S_MOV_B32 27 + %2.sub8:sgpr_1024 = S_MOV_B32 28 + %2.sub9:sgpr_1024 = S_MOV_B32 29 + %2.sub10:sgpr_1024 = S_MOV_B32 210 + %2.sub11:sgpr_1024 = S_MOV_B32 211 + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11 + +... +--- +name: test_sgpr_1024_w512 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_sgpr_1024_w512 + ; CHECK: undef %3.sub0:sgpr_512 = S_MOV_B32 0 + ; CHECK-NEXT: %3.sub1:sgpr_512 = S_MOV_B32 1 + ; CHECK-NEXT: %3.sub2:sgpr_512 = S_MOV_B32 2 + ; CHECK-NEXT: %3.sub3:sgpr_512 = S_MOV_B32 3 + ; CHECK-NEXT: %3.sub4:sgpr_512 = S_MOV_B32 4 + ; CHECK-NEXT: %3.sub5:sgpr_512 = S_MOV_B32 5 + ; CHECK-NEXT: %3.sub6:sgpr_512 = S_MOV_B32 6 + ; CHECK-NEXT: %3.sub7:sgpr_512 = S_MOV_B32 7 + ; CHECK-NEXT: %3.sub8:sgpr_512 = S_MOV_B32 8 + ; CHECK-NEXT: %3.sub9:sgpr_512 = S_MOV_B32 9 + ; CHECK-NEXT: %3.sub10:sgpr_512 = S_MOV_B32 10 + ; CHECK-NEXT: %3.sub11:sgpr_512 = S_MOV_B32 11 + ; CHECK-NEXT: %3.sub12:sgpr_512 = S_MOV_B32 12 + ; CHECK-NEXT: %3.sub13:sgpr_512 = S_MOV_B32 13 + ; CHECK-NEXT: %3.sub14:sgpr_512 = S_MOV_B32 14 + ; CHECK-NEXT: %3.sub15:sgpr_512 = S_MOV_B32 15 + ; CHECK-NEXT: S_NOP 0, implicit %3 + ; CHECK-NEXT: undef %4.sub0:sgpr_512 = S_MOV_B32 116 + ; CHECK-NEXT: %4.sub1:sgpr_512 = S_MOV_B32 117 + ; CHECK-NEXT: %4.sub2:sgpr_512 = S_MOV_B32 118 + ; CHECK-NEXT: %4.sub3:sgpr_512 = S_MOV_B32 119 + ; CHECK-NEXT: %4.sub4:sgpr_512 = S_MOV_B32 120 + ; CHECK-NEXT: %4.sub5:sgpr_512 = S_MOV_B32 121 + ; CHECK-NEXT: %4.sub6:sgpr_512 = S_MOV_B32 122 + ; CHECK-NEXT: %4.sub7:sgpr_512 = S_MOV_B32 123 + ; CHECK-NEXT: %4.sub8:sgpr_512 = S_MOV_B32 124 + ; CHECK-NEXT: %4.sub9:sgpr_512 = S_MOV_B32 125 + ; CHECK-NEXT: %4.sub10:sgpr_512 = S_MOV_B32 126 + ; CHECK-NEXT: %4.sub11:sgpr_512 = S_MOV_B32 127 + ; CHECK-NEXT: %4.sub12:sgpr_512 = S_MOV_B32 128 + ; CHECK-NEXT: %4.sub13:sgpr_512 = S_MOV_B32 129 + ; CHECK-NEXT: %4.sub14:sgpr_512 = S_MOV_B32 130 + ; CHECK-NEXT: %4.sub15:sgpr_512 = S_MOV_B32 131 + ; CHECK-NEXT: S_NOP 0, implicit %4 + ; CHECK-NEXT: undef %5.sub0:sgpr_512 = S_MOV_B32 24 + ; CHECK-NEXT: %5.sub1:sgpr_512 = S_MOV_B32 25 + ; CHECK-NEXT: %5.sub2:sgpr_512 = S_MOV_B32 26 + ; CHECK-NEXT: %5.sub3:sgpr_512 = S_MOV_B32 27 + ; CHECK-NEXT: %5.sub4:sgpr_512 = S_MOV_B32 28 + ; CHECK-NEXT: %5.sub5:sgpr_512 = S_MOV_B32 29 + ; CHECK-NEXT: %5.sub6:sgpr_512 = S_MOV_B32 210 + ; CHECK-NEXT: %5.sub7:sgpr_512 = S_MOV_B32 211 + ; CHECK-NEXT: %5.sub8:sgpr_512 = S_MOV_B32 212 + ; CHECK-NEXT: %5.sub9:sgpr_512 = S_MOV_B32 213 + ; CHECK-NEXT: %5.sub10:sgpr_512 = S_MOV_B32 214 + ; CHECK-NEXT: %5.sub11:sgpr_512 = S_MOV_B32 215 + ; CHECK-NEXT: %5.sub12:sgpr_512 = S_MOV_B32 216 + ; CHECK-NEXT: %5.sub13:sgpr_512 = S_MOV_B32 217 + ; CHECK-NEXT: %5.sub14:sgpr_512 = S_MOV_B32 218 + ; CHECK-NEXT: %5.sub15:sgpr_512 = S_MOV_B32 219 + ; CHECK-NEXT: S_NOP 0, implicit %5 + undef %0.sub0:sgpr_1024 = S_MOV_B32 00 + %0.sub1:sgpr_1024 = S_MOV_B32 01 + %0.sub2:sgpr_1024 = S_MOV_B32 02 + %0.sub3:sgpr_1024 = S_MOV_B32 03 + %0.sub4:sgpr_1024 = S_MOV_B32 04 + %0.sub5:sgpr_1024 = S_MOV_B32 05 + %0.sub6:sgpr_1024 = S_MOV_B32 06 + %0.sub7:sgpr_1024 = S_MOV_B32 07 + %0.sub8:sgpr_1024 = S_MOV_B32 08 + %0.sub9:sgpr_1024 = S_MOV_B32 09 + %0.sub10:sgpr_1024 = S_MOV_B32 010 + %0.sub11:sgpr_1024 = S_MOV_B32 011 + %0.sub12:sgpr_1024 = S_MOV_B32 012 + %0.sub13:sgpr_1024 = S_MOV_B32 013 + %0.sub14:sgpr_1024 = S_MOV_B32 014 + %0.sub15:sgpr_1024 = S_MOV_B32 015 + S_NOP 0, implicit %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15 + + undef %1.sub16:sgpr_1024 = S_MOV_B32 116 + %1.sub17:sgpr_1024 = S_MOV_B32 117 + %1.sub18:sgpr_1024 = S_MOV_B32 118 + %1.sub19:sgpr_1024 = S_MOV_B32 119 + %1.sub20:sgpr_1024 = S_MOV_B32 120 + %1.sub21:sgpr_1024 = S_MOV_B32 121 + %1.sub22:sgpr_1024 = S_MOV_B32 122 + %1.sub23:sgpr_1024 = S_MOV_B32 123 + %1.sub24:sgpr_1024 = S_MOV_B32 124 + %1.sub25:sgpr_1024 = S_MOV_B32 125 + %1.sub26:sgpr_1024 = S_MOV_B32 126 + %1.sub27:sgpr_1024 = S_MOV_B32 127 + %1.sub28:sgpr_1024 = S_MOV_B32 128 + %1.sub29:sgpr_1024 = S_MOV_B32 129 + %1.sub30:sgpr_1024 = S_MOV_B32 130 + %1.sub31:sgpr_1024 = S_MOV_B32 131 + S_NOP 0, implicit %1.sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23_sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31 + + undef %2.sub4:sgpr_1024 = S_MOV_B32 24 + %2.sub5:sgpr_1024 = S_MOV_B32 25 + %2.sub6:sgpr_1024 = S_MOV_B32 26 + %2.sub7:sgpr_1024 = S_MOV_B32 27 + %2.sub8:sgpr_1024 = S_MOV_B32 28 + %2.sub9:sgpr_1024 = S_MOV_B32 29 + %2.sub10:sgpr_1024 = S_MOV_B32 210 + %2.sub11:sgpr_1024 = S_MOV_B32 211 + %2.sub12:sgpr_1024 = S_MOV_B32 212 + %2.sub13:sgpr_1024 = S_MOV_B32 213 + %2.sub14:sgpr_1024 = S_MOV_B32 214 + %2.sub15:sgpr_1024 = S_MOV_B32 215 + %2.sub16:sgpr_1024 = S_MOV_B32 216 + %2.sub17:sgpr_1024 = S_MOV_B32 217 + %2.sub18:sgpr_1024 = S_MOV_B32 218 + %2.sub19:sgpr_1024 = S_MOV_B32 219 + S_NOP 0, implicit %2.sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15_sub16_sub17_sub18_sub19 + +... diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll --- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -164,33 +164,33 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s8, s12 ; GCN-IR-NEXT: s_add_i32 s8, s8, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s9, s13 -; GCN-IR-NEXT: s_min_u32 s18, s8, s9 -; GCN-IR-NEXT: s_sub_u32 s16, s14, s18 +; GCN-IR-NEXT: s_min_u32 s20, s8, s9 +; GCN-IR-NEXT: s_sub_u32 s16, s14, s20 ; GCN-IR-NEXT: s_subb_u32 s17, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[20:21], s[16:17], 63 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[16:17], 63 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[22:23], s[16:17], 63 -; GCN-IR-NEXT: s_or_b64 s[20:21], s[10:11], s[20:21] -; GCN-IR-NEXT: s_and_b64 s[10:11], s[20:21], exec +; GCN-IR-NEXT: s_or_b64 s[18:19], s[10:11], s[18:19] +; GCN-IR-NEXT: s_and_b64 s[10:11], s[18:19], exec ; GCN-IR-NEXT: s_cselect_b32 s11, 0, s13 ; GCN-IR-NEXT: s_cselect_b32 s10, 0, s12 -; GCN-IR-NEXT: s_or_b64 s[20:21], s[20:21], s[22:23] +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23] ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[20:21] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[18:19] ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s20, s16, 1 -; GCN-IR-NEXT: s_addc_u32 s21, s17, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[20:21], 0 +; GCN-IR-NEXT: s_add_u32 s18, s16, 1 +; GCN-IR-NEXT: s_addc_u32 s19, s17, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0 ; GCN-IR-NEXT: s_sub_i32 s16, 63, s16 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[12:13], s16 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[12:13], s20 -; GCN-IR-NEXT: s_add_u32 s19, s6, -1 -; GCN-IR-NEXT: s_addc_u32 s20, s7, -1 +; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[12:13], s18 +; GCN-IR-NEXT: s_add_u32 s18, s6, -1 +; GCN-IR-NEXT: s_addc_u32 s19, s7, -1 ; GCN-IR-NEXT: s_not_b64 s[8:9], s[14:15] -; GCN-IR-NEXT: s_add_u32 s12, s8, s18 +; GCN-IR-NEXT: s_add_u32 s12, s8, s20 ; GCN-IR-NEXT: s_addc_u32 s13, s9, 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 ; GCN-IR-NEXT: s_mov_b32 s9, 0 @@ -201,8 +201,8 @@ ; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 ; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[8:9] ; GCN-IR-NEXT: s_or_b64 s[10:11], s[14:15], s[10:11] -; GCN-IR-NEXT: s_sub_u32 s8, s19, s16 -; GCN-IR-NEXT: s_subb_u32 s8, s20, s17 +; GCN-IR-NEXT: s_sub_u32 s8, s18, s16 +; GCN-IR-NEXT: s_subb_u32 s8, s19, s17 ; GCN-IR-NEXT: s_ashr_i32 s14, s8, 31 ; GCN-IR-NEXT: s_mov_b32 s15, s14 ; GCN-IR-NEXT: s_and_b32 s8, s14, 1 @@ -211,9 +211,9 @@ ; GCN-IR-NEXT: s_subb_u32 s17, s17, s15 ; GCN-IR-NEXT: s_add_u32 s12, s12, 1 ; GCN-IR-NEXT: s_addc_u32 s13, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[22:23], s[12:13], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], s[8:9] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[22:23] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21] ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3 ; GCN-IR-NEXT: .LBB0_4: ; %Flow6 ; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[10:11], 1 @@ -362,100 +362,100 @@ ; GCN-IR-LABEL: v_test_sdiv: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-IR-NEXT: v_ashrrev_i32_e32 v4, 31, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v5, 31, v3 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v4, v1 -; GCN-IR-NEXT: v_sub_i32_e32 v11, vcc, v0, v4 -; GCN-IR-NEXT: v_subb_u32_e32 v12, vcc, v1, v4, vcc -; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v2 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v5, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, v1, v5 -; GCN-IR-NEXT: v_subb_u32_e32 v3, vcc, v0, v5, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[11:12] -; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v2 -; GCN-IR-NEXT: s_or_b64 s[6:7], vcc, s[4:5] -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 32, v0 -; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v3 -; GCN-IR-NEXT: v_min_u32_e32 v0, v0, v7 -; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v11 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 32, v7 -; GCN-IR-NEXT: v_ffbh_u32_e32 v8, v12 -; GCN-IR-NEXT: v_min_u32_e32 v13, v7, v8 -; GCN-IR-NEXT: v_sub_i32_e32 v7, vcc, v0, v13 -; GCN-IR-NEXT: v_subb_u32_e64 v8, s[4:5], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[7:8] -; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[4:5], 63, v[7:8] -; GCN-IR-NEXT: s_or_b64 s[6:7], s[6:7], vcc -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[6:7], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v6, v4 -; GCN-IR-NEXT: v_mov_b32_e32 v1, v5 -; GCN-IR-NEXT: v_cndmask_b32_e64 v10, v12, 0, s[6:7] -; GCN-IR-NEXT: s_and_b64 s[4:5], s[8:9], s[4:5] -; GCN-IR-NEXT: v_cndmask_b32_e64 v9, v11, 0, s[6:7] +; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v12, v0 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v13, 31, v3 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v12, v1 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v1, v12, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, v13, v2 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v13, v3 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v13 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 +; GCN-IR-NEXT: v_add_i32_e64 v2, s[6:7], 32, v2 +; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 +; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v6 +; GCN-IR-NEXT: v_add_i32_e64 v2, s[6:7], 32, v2 +; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v7 +; GCN-IR-NEXT: v_min_u32_e32 v11, v2, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[6:7], v10, v11 +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7] +; GCN-IR-NEXT: v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7] +; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[2:3] +; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1 +; GCN-IR-NEXT: v_mov_b32_e32 v14, v12 +; GCN-IR-NEXT: v_mov_b32_e32 v15, v13 +; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v7, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v6, 0, s[4:5] +; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB1_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v7 -; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v8, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v7, s[4:5], 63, v7 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[14:15] -; GCN-IR-NEXT: v_lshl_b64 v[7:8], v[11:12], v7 -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9] +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[6:7], v2 +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB1_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_add_i32_e32 v18, vcc, -1, v2 -; GCN-IR-NEXT: v_addc_u32_e32 v19, vcc, -1, v3, vcc -; GCN-IR-NEXT: v_not_b32_e32 v0, v0 -; GCN-IR-NEXT: v_lshr_b64 v[14:15], v[11:12], v14 -; GCN-IR-NEXT: v_not_b32_e32 v9, 0 -; GCN-IR-NEXT: v_add_i32_e32 v11, vcc, v0, v13 -; GCN-IR-NEXT: v_mov_b32_e32 v16, 0 -; GCN-IR-NEXT: v_addc_u32_e32 v12, vcc, 0, v9, vcc -; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v17, 0 +; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0 +; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc +; GCN-IR-NEXT: v_not_b32_e32 v5, v10 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8 +; GCN-IR-NEXT: v_not_b32_e32 v4, 0 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v5, v11 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v4, vcc +; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 +; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[14:15], v[14:15], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v0, 31, v8 -; GCN-IR-NEXT: v_or_b32_e32 v0, v14, v0 -; GCN-IR-NEXT: v_sub_i32_e32 v9, vcc, v18, v0 -; GCN-IR-NEXT: v_lshl_b64 v[7:8], v[7:8], 1 -; GCN-IR-NEXT: v_subb_u32_e32 v9, vcc, v19, v15, vcc -; GCN-IR-NEXT: v_ashrrev_i32_e32 v13, 31, v9 -; GCN-IR-NEXT: v_add_i32_e32 v11, vcc, 1, v11 -; GCN-IR-NEXT: v_or_b32_e32 v7, v16, v7 -; GCN-IR-NEXT: v_and_b32_e32 v9, 1, v13 -; GCN-IR-NEXT: v_and_b32_e32 v16, v13, v3 -; GCN-IR-NEXT: v_and_b32_e32 v13, v13, v2 -; GCN-IR-NEXT: v_addc_u32_e32 v12, vcc, 0, v12, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[11:12] -; GCN-IR-NEXT: v_sub_i32_e64 v14, s[4:5], v0, v13 -; GCN-IR-NEXT: v_or_b32_e32 v8, v17, v8 -; GCN-IR-NEXT: v_subb_u32_e64 v15, s[4:5], v15, v16, s[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v17, v10 +; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 +; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v16, v8 +; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v17, v9, vcc +; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6 +; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3 +; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10 +; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1 +; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10 +; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v11, v5 ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v16, v9 +; GCN-IR-NEXT: v_mov_b32_e32 v10, v4 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB1_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[7:8], 1 -; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v3 -; GCN-IR-NEXT: v_or_b32_e32 v9, v9, v2 +; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0 ; GCN-IR-NEXT: .LBB1_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_xor_b32_e32 v0, v5, v4 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v6 -; GCN-IR-NEXT: v_xor_b32_e32 v3, v9, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v2, v10, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v13, v12 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v15, v14 +; GCN-IR-NEXT: v_xor_b32_e32 v3, v4, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v2, v5, v1 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v3, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] @@ -1001,33 +1001,33 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s8, s12 ; GCN-IR-NEXT: s_add_i32 s8, s8, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s9, s13 -; GCN-IR-NEXT: s_min_u32 s18, s8, s9 -; GCN-IR-NEXT: s_sub_u32 s16, s14, s18 +; GCN-IR-NEXT: s_min_u32 s20, s8, s9 +; GCN-IR-NEXT: s_sub_u32 s16, s14, s20 ; GCN-IR-NEXT: s_subb_u32 s17, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[20:21], s[16:17], 63 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[16:17], 63 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[22:23], s[16:17], 63 -; GCN-IR-NEXT: s_or_b64 s[20:21], s[10:11], s[20:21] -; GCN-IR-NEXT: s_and_b64 s[10:11], s[20:21], exec +; GCN-IR-NEXT: s_or_b64 s[18:19], s[10:11], s[18:19] +; GCN-IR-NEXT: s_and_b64 s[10:11], s[18:19], exec ; GCN-IR-NEXT: s_cselect_b32 s11, 0, s13 ; GCN-IR-NEXT: s_cselect_b32 s10, 0, s12 -; GCN-IR-NEXT: s_or_b64 s[20:21], s[20:21], s[22:23] +; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23] ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[20:21] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[18:19] ; GCN-IR-NEXT: s_cbranch_vccz .LBB9_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s20, s16, 1 -; GCN-IR-NEXT: s_addc_u32 s21, s17, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[20:21], 0 +; GCN-IR-NEXT: s_add_u32 s18, s16, 1 +; GCN-IR-NEXT: s_addc_u32 s19, s17, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0 ; GCN-IR-NEXT: s_sub_i32 s16, 63, s16 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[12:13], s16 ; GCN-IR-NEXT: s_cbranch_vccz .LBB9_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[12:13], s20 -; GCN-IR-NEXT: s_add_u32 s19, s6, -1 -; GCN-IR-NEXT: s_addc_u32 s20, s7, -1 +; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[12:13], s18 +; GCN-IR-NEXT: s_add_u32 s18, s6, -1 +; GCN-IR-NEXT: s_addc_u32 s19, s7, -1 ; GCN-IR-NEXT: s_not_b64 s[8:9], s[14:15] -; GCN-IR-NEXT: s_add_u32 s12, s8, s18 +; GCN-IR-NEXT: s_add_u32 s12, s8, s20 ; GCN-IR-NEXT: s_addc_u32 s13, s9, 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 ; GCN-IR-NEXT: s_mov_b32 s9, 0 @@ -1038,8 +1038,8 @@ ; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1 ; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[8:9] ; GCN-IR-NEXT: s_or_b64 s[10:11], s[14:15], s[10:11] -; GCN-IR-NEXT: s_sub_u32 s8, s19, s16 -; GCN-IR-NEXT: s_subb_u32 s8, s20, s17 +; GCN-IR-NEXT: s_sub_u32 s8, s18, s16 +; GCN-IR-NEXT: s_subb_u32 s8, s19, s17 ; GCN-IR-NEXT: s_ashr_i32 s14, s8, 31 ; GCN-IR-NEXT: s_mov_b32 s15, s14 ; GCN-IR-NEXT: s_and_b32 s8, s14, 1 @@ -1048,9 +1048,9 @@ ; GCN-IR-NEXT: s_subb_u32 s17, s17, s15 ; GCN-IR-NEXT: s_add_u32 s12, s12, 1 ; GCN-IR-NEXT: s_addc_u32 s13, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[22:23], s[12:13], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], s[8:9] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[22:23] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21] ; GCN-IR-NEXT: s_cbranch_vccz .LBB9_3 ; GCN-IR-NEXT: .LBB9_4: ; %Flow3 ; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[10:11], 1 @@ -1206,32 +1206,32 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 -; GCN-IR-NEXT: s_min_u32 s10, s10, s11 -; GCN-IR-NEXT: s_add_u32 s12, s10, 0xffffffc5 -; GCN-IR-NEXT: s_addc_u32 s13, 0, -1 +; GCN-IR-NEXT: s_min_u32 s14, s10, s11 +; GCN-IR-NEXT: s_add_u32 s10, s14, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s11, 0, -1 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63 -; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15] -; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[10:11], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[10:11], 63 +; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[12:13] +; GCN-IR-NEXT: s_and_b64 s[8:9], s[12:13], exec ; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24 -; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15] +; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[16:17] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_mov_b32 s9, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s14, s12, 1 -; GCN-IR-NEXT: s_addc_u32 s15, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0 -; GCN-IR-NEXT: s_sub_i32 s11, 63, s12 +; GCN-IR-NEXT: s_add_u32 s12, s10, 1 +; GCN-IR-NEXT: s_addc_u32 s13, s11, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[12:13], 0 +; GCN-IR-NEXT: s_sub_i32 s10, 63, s10 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9] -; GCN-IR-NEXT: s_lshl_b64 s[8:9], 24, s11 +; GCN-IR-NEXT: s_lshl_b64 s[8:9], 24, s10 ; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[12:13], 24, s14 +; GCN-IR-NEXT: s_lshr_b64 s[12:13], 24, s12 ; GCN-IR-NEXT: s_add_u32 s16, s2, -1 ; GCN-IR-NEXT: s_addc_u32 s17, s3, -1 -; GCN-IR-NEXT: s_sub_u32 s10, 58, s10 +; GCN-IR-NEXT: s_sub_u32 s10, 58, s14 ; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 ; GCN-IR-NEXT: s_mov_b32 s7, 0 @@ -1385,87 +1385,87 @@ ; GCN-IR-LABEL: v_test_sdiv_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-IR-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v2, v1 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 -; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v12, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v12, v1 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 +; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5 -; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, s6, v8 -; GCN-IR-NEXT: v_addc_u32_e64 v6, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10 +; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[5:6] -; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[5:6] +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[2:3] ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc -; GCN-IR-NEXT: v_cndmask_b32_e64 v7, 24, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, 24, 0, s[4:5] ; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v3, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v13, v12 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7] ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB11_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v9, vcc, 1, v5 -; GCN-IR-NEXT: v_addc_u32_e32 v10, vcc, 0, v6, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v5 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10] -; GCN-IR-NEXT: v_lshl_b64 v[4:5], 24, v4 -; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_lshl_b64 v[2:3], 24, v2 +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB11_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0 ; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_lshr_b64 v[10:11], 24, v9 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, 58, v8 -; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 -; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], 0, 0, vcc +; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v10 +; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5 -; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6 -; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v14, v10 -; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v15, v11, vcc -; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8 -; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5 -; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12 -; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v1 -; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12 -; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v13, v7 +; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 +; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v8 +; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v9, vcc +; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6 +; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3 +; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10 +; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1 +; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10 +; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v11, v5 ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v12, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v10, v4 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB11_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB11_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], 1 -; GCN-IR-NEXT: v_or_b32_e32 v4, v7, v1 -; GCN-IR-NEXT: v_or_b32_e32 v7, v6, v0 +; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0 ; GCN-IR-NEXT: .LBB11_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_xor_b32_e32 v0, v7, v2 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v4, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v12 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v13 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] %result = sdiv i64 24, %x ret i64 %result @@ -1578,39 +1578,39 @@ ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-IR-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v2, v1 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 32, v4 -; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v12, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v12, v1 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 +; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0 -; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, s6, v8 -; GCN-IR-NEXT: v_addc_u32_e64 v6, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10 +; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[5:6] -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0x8000 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0x8000 ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[5:6] -; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v7, 0, s[4:5] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5] ; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v3, v2 -; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v13, v12 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000 ; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB12_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v9, vcc, 1, v5 -; GCN-IR-NEXT: v_addc_u32_e32 v10, vcc, 0, v6, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v5 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10] -; GCN-IR-NEXT: v_lshl_b64 v[4:5], s[8:9], v4 -; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2 +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB12_5 @@ -1618,50 +1618,50 @@ ; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0 ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000 ; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_lshr_b64 v[10:11], s[4:5], v9 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, 47, v8 -; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 -; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], 0, 0, vcc +; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10 +; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5 -; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6 -; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v14, v10 -; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v15, v11, vcc -; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8 -; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5 -; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12 -; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v1 -; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12 -; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v13, v7 +; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 +; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v8 +; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v9, vcc +; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6 +; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3 +; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10 +; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1 +; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10 +; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v11, v5 ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v12, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v10, v4 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB12_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], 1 -; GCN-IR-NEXT: v_or_b32_e32 v4, v7, v1 -; GCN-IR-NEXT: v_or_b32_e32 v7, v6, v0 +; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0 ; GCN-IR-NEXT: .LBB12_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_xor_b32_e32 v0, v7, v2 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v4, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v12 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v13 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] %result = sdiv i64 32768, %x ret i64 %result @@ -1681,84 +1681,84 @@ ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-IR-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v2, v1 -; GCN-IR-NEXT: v_sub_i32_e32 v7, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v8, vcc, v1, v2, vcc -; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v7 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v10, v0 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v10, v1 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v0, v10 +; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, v1, v10, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v4 ; GCN-IR-NEXT: v_add_i32_e64 v0, s[4:5], 32, v0 -; GCN-IR-NEXT: v_ffbh_u32_e32 v1, v8 -; GCN-IR-NEXT: v_min_u32_e32 v0, v0, v1 -; GCN-IR-NEXT: v_sub_i32_e64 v3, s[4:5], 48, v0 -; GCN-IR-NEXT: v_subb_u32_e64 v4, s[4:5], 0, 0, s[4:5] -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[7:8] -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[3:4] -; GCN-IR-NEXT: v_mov_b32_e32 v1, v2 +; GCN-IR-NEXT: v_ffbh_u32_e32 v1, v5 +; GCN-IR-NEXT: v_min_u32_e32 v8, v0, v1 +; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 48, v8 +; GCN-IR-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5] +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v11, v10 ; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1] ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1 -; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v8, 0, s[4:5] -; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v7, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v5, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v2, v4, 0, s[4:5] ; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB13_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v9, vcc, 1, v3 -; GCN-IR-NEXT: v_addc_u32_e32 v10, vcc, 0, v4, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v3, s[4:5], 63, v3 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10] -; GCN-IR-NEXT: v_lshl_b64 v[3:4], v[7:8], v3 -; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v0 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 63, v0 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], v0 +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB13_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[9:10], v[7:8], v9 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 0xffffffcf, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v8, s[4:5], 0, -1, vcc +; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[4:5], v6 +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffcf, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_addc_u32_e64 v5, s[4:5], 0, -1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 ; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff ; GCN-IR-NEXT: .LBB13_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[9:10], v[9:10], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v0, 31, v4 -; GCN-IR-NEXT: v_or_b32_e32 v0, v9, v0 -; GCN-IR-NEXT: v_sub_i32_e32 v5, vcc, s12, v0 -; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, 0, v10, vcc -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v7 -; GCN-IR-NEXT: v_lshl_b64 v[3:4], v[3:4], 1 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v9, 31, v5 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v8, vcc -; GCN-IR-NEXT: v_and_b32_e32 v5, 1, v9 -; GCN-IR-NEXT: v_and_b32_e32 v9, 0x8000, v9 -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[7:8] -; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4 -; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3 -; GCN-IR-NEXT: v_sub_i32_e64 v9, s[4:5], v0, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v12, v6 -; GCN-IR-NEXT: v_subbrev_u32_e64 v10, s[4:5], 0, v10, s[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 +; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 +; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, s12, v6 +; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 +; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v4 +; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 +; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8 +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 +; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v9, v3 +; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v11, v5 +; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB13_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB13_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[3:4], v[3:4], 1 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4 -; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3 +; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 +; GCN-IR-NEXT: v_or_b32_e32 v3, v3, v1 +; GCN-IR-NEXT: v_or_b32_e32 v2, v2, v0 ; GCN-IR-NEXT: .LBB13_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_xor_b32_e32 v0, v5, v2 -; GCN-IR-NEXT: v_xor_b32_e32 v3, v6, v1 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v3, v1, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v10 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v11 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] %result = sdiv i64 %x, 32768 ret i64 %result diff --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir --- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir +++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir @@ -1,3 +1,4 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -amdgpu-dce-in-ra=0 -stress-regalloc=1 -start-before=simple-register-coalescing -stop-after=greedy,1 -o - %s | FileCheck %s # https://bugs.llvm.org/show_bug.cgi?id=33620 @@ -5,17 +6,8 @@ # This would assert due to the empty live interval created for %9 # on the last S_NOP with an undef subreg use. -# CHECK-LABEL: name: expecting_non_empty_interval -# CHECK: undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec -# CHECK-NEXT: dead %3:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit $mode, implicit $exec -# CHECK-NEXT: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit $mode, implicit $exec -# CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) -# CHECK: S_NOP 0, implicit %6.sub1 -# CHECK-NEXT: %8:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) -# CHECK-NEXT: S_NOP 0, implicit %8.sub1 -# CHECK-NEXT: S_NOP 0, implicit undef %9.sub0 name: expecting_non_empty_interval tracksRegLiveness: true @@ -23,6 +15,19 @@ scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 stackPtrOffsetReg: $sgpr32 body: | + ; CHECK-LABEL: name: expecting_non_empty_interval + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1786773504, implicit $exec + ; CHECK-NEXT: dead [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 0, [[V_MOV_B32_e32_]], implicit $mode, implicit $exec + ; CHECK-NEXT: undef %4.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %4.sub1, implicit $mode, implicit $exec + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_NOP 0, implicit %4.sub1 + ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1786773504, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]] + ; CHECK-NEXT: S_NOP 0, implicit undef %4.sub0 bb.0: successors: %bb.1 @@ -40,22 +45,25 @@ # Similar assert which happens when trying to rematerialize. # https://bugs.llvm.org/show_bug.cgi?id=33884 --- -# CHECK-LABEL: name: rematerialize_empty_interval_has_reference -# CHECK-NOT: MOV -# CHECK: undef %1.sub2:vreg_128 = V_MOV_B32_e32 1786773504, implicit $exec -# CHECK: bb.1: -# CHECK-NEXT: S_NOP 0, implicit %1.sub2 -# CHECK-NEXT: undef %2.sub2:vreg_128 = V_MOV_B32_e32 0, implicit $exec -# CHECK-NEXT: S_NOP 0, implicit %2.sub2 -# CHECK-NEXT: S_NOP 0, implicit undef %4.sub0 name: rematerialize_empty_interval_has_reference tracksRegLiveness: true machineFunctionInfo: scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 stackPtrOffsetReg: $sgpr32 body: | + ; CHECK-LABEL: name: rematerialize_empty_interval_has_reference + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1786773504, implicit $exec + ; CHECK-NEXT: undef %2.sub2:vreg_96 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_NOP 0, implicit %2.sub2 + ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_]] + ; CHECK-NEXT: S_NOP 0, implicit undef %2.sub0 bb.0: successors: %bb.1 diff --git a/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll b/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll --- a/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll @@ -137,7 +137,7 @@ ; GFX900: NumVgprs: 256 ; GFX908: NumVgprs: 252 -; GFX900: ScratchSize: 1668 +; GFX900: ScratchSize: 132 ; GFX908: ScratchSize: 0 ; GFX900: VGPRBlocks: 63 ; GFX908: VGPRBlocks: 62 diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll --- a/llvm/test/CodeGen/AMDGPU/srem64.ll +++ b/llvm/test/CodeGen/AMDGPU/srem64.ll @@ -135,34 +135,34 @@ ; GCN-IR-NEXT: s_add_i32 s6, s6, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 ; GCN-IR-NEXT: s_min_u32 s10, s10, s11 -; GCN-IR-NEXT: s_min_u32 s14, s6, s7 -; GCN-IR-NEXT: s_sub_u32 s12, s10, s14 +; GCN-IR-NEXT: s_min_u32 s18, s6, s7 +; GCN-IR-NEXT: s_sub_u32 s12, s10, s18 ; GCN-IR-NEXT: s_subb_u32 s13, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[12:13], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[8:9], s[16:17] -; GCN-IR-NEXT: s_and_b64 s[8:9], s[16:17], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15] +; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec ; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3 ; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] ; GCN-IR-NEXT: s_mov_b64 s[6:7], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15] ; GCN-IR-NEXT: s_mov_b32 s11, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s16, s12, 1 -; GCN-IR-NEXT: s_addc_u32 s17, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[16:17], 0 +; GCN-IR-NEXT: s_add_u32 s14, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s15, s13, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0 ; GCN-IR-NEXT: s_sub_i32 s12, 63, s12 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s16 +; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14 ; GCN-IR-NEXT: s_add_u32 s16, s4, -1 ; GCN-IR-NEXT: s_addc_u32 s17, s5, -1 ; GCN-IR-NEXT: s_not_b64 s[6:7], s[10:11] -; GCN-IR-NEXT: s_add_u32 s10, s6, s14 +; GCN-IR-NEXT: s_add_u32 s10, s6, s18 ; GCN-IR-NEXT: s_addc_u32 s11, s7, 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 ; GCN-IR-NEXT: s_mov_b32 s7, 0 @@ -339,107 +339,107 @@ ; GCN-IR-LABEL: v_test_srem: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-IR-NEXT: v_ashrrev_i32_e32 v4, 31, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v4 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v3 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v4 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v4 -; GCN-IR-NEXT: v_xor_b32_e32 v2, v2, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc -; GCN-IR-NEXT: v_xor_b32_e32 v3, v3, v6 -; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, v2, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v3, vcc, v3, v6, vcc -; GCN-IR-NEXT: v_ffbh_u32_e32 v6, v2 -; GCN-IR-NEXT: v_add_i32_e64 v6, s[6:7], 32, v6 -; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v3 -; GCN-IR-NEXT: v_min_u32_e32 v10, v6, v7 -; GCN-IR-NEXT: v_ffbh_u32_e32 v6, v0 -; GCN-IR-NEXT: v_add_i32_e64 v6, s[6:7], 32, v6 -; GCN-IR-NEXT: v_ffbh_u32_e32 v7, v1 -; GCN-IR-NEXT: v_min_u32_e32 v11, v6, v7 -; GCN-IR-NEXT: v_sub_i32_e64 v7, s[6:7], v10, v11 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v14, 31, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v14 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v4, 31, v3 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v14 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v14 +; GCN-IR-NEXT: v_xor_b32_e32 v2, v2, v4 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v14, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v3, v3, v4 +; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, v2, v4 +; GCN-IR-NEXT: v_subb_u32_e32 v3, vcc, v3, v4, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2 +; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4 +; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3 +; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5 +; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 +; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4 +; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 +; GCN-IR-NEXT: v_min_u32_e32 v13, v4, v5 +; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v12, v13 ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3] ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_subb_u32_e64 v8, s[6:7], 0, 0, s[6:7] -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[7:8] +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7] +; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[4:5] ; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[7:8] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v5, v4 -; GCN-IR-NEXT: v_cndmask_b32_e64 v9, v1, 0, s[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v15, v14 +; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[4:5] ; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v0, 0, s[4:5] ; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB1_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v7 -; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v8, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], 63, v7 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13] -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[0:1], v6 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9] +; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4 +; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB1_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2 ; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc -; GCN-IR-NEXT: v_not_b32_e32 v9, v10 -; GCN-IR-NEXT: v_lshr_b64 v[12:13], v[0:1], v12 -; GCN-IR-NEXT: v_not_b32_e32 v8, 0 -; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, v9, v11 -; GCN-IR-NEXT: v_mov_b32_e32 v14, 0 -; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v8, vcc +; GCN-IR-NEXT: v_not_b32_e32 v7, v12 +; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8 +; GCN-IR-NEXT: v_not_b32_e32 v6, 0 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13 +; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v15, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 ; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v8, 31, v7 -; GCN-IR-NEXT: v_or_b32_e32 v12, v12, v8 -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, v16, v12 -; GCN-IR-NEXT: v_subb_u32_e32 v8, vcc, v17, v13, vcc -; GCN-IR-NEXT: v_or_b32_e32 v6, v14, v6 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v14, 31, v8 -; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v10 -; GCN-IR-NEXT: v_or_b32_e32 v7, v15, v7 -; GCN-IR-NEXT: v_and_b32_e32 v8, 1, v14 -; GCN-IR-NEXT: v_and_b32_e32 v15, v14, v3 -; GCN-IR-NEXT: v_and_b32_e32 v14, v14, v2 -; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] -; GCN-IR-NEXT: v_sub_i32_e64 v12, s[4:5], v12, v14 -; GCN-IR-NEXT: v_subb_u32_e64 v13, s[4:5], v13, v15, s[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v15, v9 +; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5 +; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6 +; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v16, v10 +; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v17, v11, vcc +; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8 +; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5 +; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12 +; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v3 +; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] +; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12 +; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v13, v7 ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v14, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v12, v6 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB1_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 -; GCN-IR-NEXT: v_or_b32_e32 v9, v9, v7 -; GCN-IR-NEXT: v_or_b32_e32 v6, v8, v6 +; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 +; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5 +; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4 ; GCN-IR-NEXT: .LBB1_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_mul_lo_u32 v7, v2, v9 -; GCN-IR-NEXT: v_mul_hi_u32 v8, v2, v6 +; GCN-IR-NEXT: v_mul_lo_u32 v4, v2, v7 +; GCN-IR-NEXT: v_mul_hi_u32 v5, v2, v6 ; GCN-IR-NEXT: v_mul_lo_u32 v3, v3, v6 ; GCN-IR-NEXT: v_mul_lo_u32 v2, v2, v6 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, v8, v7 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, v7, v3 +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v5, v4 +; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, v4, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc -; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v4 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v5 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v4 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v5, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v14 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v15 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v14 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v15, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] %result = srem i64 %x, %y ret i64 %result @@ -1037,33 +1037,33 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s6, s2 ; GCN-IR-NEXT: s_add_i32 s6, s6, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 -; GCN-IR-NEXT: s_min_u32 s16, s6, s7 -; GCN-IR-NEXT: s_sub_u32 s14, s12, s16 +; GCN-IR-NEXT: s_min_u32 s20, s6, s7 +; GCN-IR-NEXT: s_sub_u32 s14, s12, s20 ; GCN-IR-NEXT: s_subb_u32 s15, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[14:15], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[14:15], 63 -; GCN-IR-NEXT: s_or_b64 s[18:19], s[10:11], s[18:19] -; GCN-IR-NEXT: s_and_b64 s[10:11], s[18:19], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[14:15], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[14:15], 63 +; GCN-IR-NEXT: s_or_b64 s[16:17], s[10:11], s[16:17] +; GCN-IR-NEXT: s_and_b64 s[10:11], s[16:17], exec ; GCN-IR-NEXT: s_cselect_b32 s11, 0, s3 ; GCN-IR-NEXT: s_cselect_b32 s10, 0, s2 -; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[20:21] +; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19] ; GCN-IR-NEXT: s_mov_b64 s[6:7], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[18:19] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s18, s14, 1 -; GCN-IR-NEXT: s_addc_u32 s19, s15, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0 +; GCN-IR-NEXT: s_add_u32 s16, s14, 1 +; GCN-IR-NEXT: s_addc_u32 s17, s15, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[16:17], 0 ; GCN-IR-NEXT: s_sub_i32 s14, 63, s14 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[2:3], s14 ; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[2:3], s18 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[2:3], s16 ; GCN-IR-NEXT: s_add_u32 s18, s8, -1 ; GCN-IR-NEXT: s_addc_u32 s19, s9, -1 ; GCN-IR-NEXT: s_not_b64 s[6:7], s[12:13] -; GCN-IR-NEXT: s_add_u32 s12, s6, s16 +; GCN-IR-NEXT: s_add_u32 s12, s6, s20 ; GCN-IR-NEXT: s_addc_u32 s13, s7, 0 ; GCN-IR-NEXT: s_mov_b64 s[16:17], 0 ; GCN-IR-NEXT: s_mov_b32 s7, 0 @@ -1188,33 +1188,33 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s8, s4 ; GCN-IR-NEXT: s_add_i32 s8, s8, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s9, s5 -; GCN-IR-NEXT: s_min_u32 s16, s8, s9 -; GCN-IR-NEXT: s_sub_u32 s14, s12, s16 +; GCN-IR-NEXT: s_min_u32 s20, s8, s9 +; GCN-IR-NEXT: s_sub_u32 s14, s12, s20 ; GCN-IR-NEXT: s_subb_u32 s15, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[14:15], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[14:15], 63 -; GCN-IR-NEXT: s_or_b64 s[18:19], s[10:11], s[18:19] -; GCN-IR-NEXT: s_and_b64 s[10:11], s[18:19], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[14:15], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[14:15], 63 +; GCN-IR-NEXT: s_or_b64 s[16:17], s[10:11], s[16:17] +; GCN-IR-NEXT: s_and_b64 s[10:11], s[16:17], exec ; GCN-IR-NEXT: s_cselect_b32 s11, 0, s5 ; GCN-IR-NEXT: s_cselect_b32 s10, 0, s4 -; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[20:21] +; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19] ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[18:19] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz .LBB9_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s18, s14, 1 -; GCN-IR-NEXT: s_addc_u32 s19, s15, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0 +; GCN-IR-NEXT: s_add_u32 s16, s14, 1 +; GCN-IR-NEXT: s_addc_u32 s17, s15, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[16:17], 0 ; GCN-IR-NEXT: s_sub_i32 s14, 63, s14 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[4:5], s14 ; GCN-IR-NEXT: s_cbranch_vccz .LBB9_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[4:5], s18 +; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[4:5], s16 ; GCN-IR-NEXT: s_add_u32 s18, s6, -1 ; GCN-IR-NEXT: s_addc_u32 s19, s7, -1 ; GCN-IR-NEXT: s_not_b64 s[8:9], s[12:13] -; GCN-IR-NEXT: s_add_u32 s12, s8, s16 +; GCN-IR-NEXT: s_add_u32 s12, s8, s20 ; GCN-IR-NEXT: s_addc_u32 s13, s9, 0 ; GCN-IR-NEXT: s_mov_b64 s[16:17], 0 ; GCN-IR-NEXT: s_mov_b32 s9, 0 @@ -1396,32 +1396,32 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s2, s4 ; GCN-IR-NEXT: s_add_i32 s2, s2, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s3, s5 -; GCN-IR-NEXT: s_min_u32 s8, s2, s3 -; GCN-IR-NEXT: s_add_u32 s2, s8, 0xffffffc5 +; GCN-IR-NEXT: s_min_u32 s12, s2, s3 +; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc5 ; GCN-IR-NEXT: s_addc_u32 s3, 0, -1 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[4:5], 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[2:3], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[4:5], 0 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[2:3], 63 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[2:3], 63 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[10:11], s[12:13] -; GCN-IR-NEXT: s_and_b64 s[10:11], s[12:13], exec -; GCN-IR-NEXT: s_cselect_b32 s10, 0, 24 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15] -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13] -; GCN-IR-NEXT: s_mov_b32 s11, 0 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11] +; GCN-IR-NEXT: s_and_b64 s[8:9], s[10:11], exec +; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] +; GCN-IR-NEXT: s_mov_b32 s9, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s10, s2, 1 -; GCN-IR-NEXT: s_addc_u32 s11, s3, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[10:11], 0 +; GCN-IR-NEXT: s_add_u32 s8, s2, 1 +; GCN-IR-NEXT: s_addc_u32 s9, s3, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0 ; GCN-IR-NEXT: s_sub_i32 s2, 63, s2 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_lshl_b64 s[2:3], 24, s2 ; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10 +; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s8 ; GCN-IR-NEXT: s_add_u32 s14, s4, -1 ; GCN-IR-NEXT: s_addc_u32 s15, s5, -1 -; GCN-IR-NEXT: s_sub_u32 s8, 58, s8 +; GCN-IR-NEXT: s_sub_u32 s8, 58, s12 ; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 ; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 ; GCN-IR-NEXT: s_mov_b32 s7, 0 @@ -1448,13 +1448,13 @@ ; GCN-IR-NEXT: s_cbranch_vccz .LBB10_3 ; GCN-IR-NEXT: .LBB10_4: ; %Flow5 ; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 1 -; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[2:3] +; GCN-IR-NEXT: s_or_b64 s[8:9], s[6:7], s[2:3] ; GCN-IR-NEXT: .LBB10_5: ; %udiv-end -; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 ; GCN-IR-NEXT: v_mul_hi_u32 v0, s4, v0 -; GCN-IR-NEXT: s_mul_i32 s6, s4, s11 -; GCN-IR-NEXT: s_mul_i32 s5, s5, s10 -; GCN-IR-NEXT: s_mul_i32 s4, s4, s10 +; GCN-IR-NEXT: s_mul_i32 s6, s4, s9 +; GCN-IR-NEXT: s_mul_i32 s5, s5, s8 +; GCN-IR-NEXT: s_mul_i32 s4, s4, s8 ; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s6, v0 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, s5, v0 ; GCN-IR-NEXT: v_sub_i32_e64 v0, vcc, 24, s4 @@ -1582,25 +1582,25 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, s6, v6 -; GCN-IR-NEXT: v_addc_u32_e64 v5, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10 +; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] -; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[4:5] +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[2:3] ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc -; GCN-IR-NEXT: v_cndmask_b32_e64 v3, 24, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, 24, 0, s[4:5] ; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7] ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB11_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], 24, v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1610,8 +1610,8 @@ ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0 ; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v7 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v6 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v10 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 @@ -1645,16 +1645,16 @@ ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB11_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[2:3], 1 -; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v7 -; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v6 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2 ; GCN-IR-NEXT: .LBB11_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_mul_lo_u32 v2, v0, v2 -; GCN-IR-NEXT: v_mul_hi_u32 v4, v0, v3 -; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, v3 -; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, v3 -; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, v4, v2 +; GCN-IR-NEXT: v_mul_lo_u32 v2, v0, v5 +; GCN-IR-NEXT: v_mul_hi_u32 v3, v0, v4 +; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, v4 +; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, v4 +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, v3, v2 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc @@ -1773,27 +1773,27 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 ; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, s6, v6 -; GCN-IR-NEXT: v_addc_u32_e64 v4, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10 +; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] -; GCN-IR-NEXT: v_mov_b32_e32 v5, 0x8000 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0x8000 ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] -; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v5, 0, s[4:5] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5] ; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000 ; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB12_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v3 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v4, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v3 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1804,8 +1804,8 @@ ; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0 ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000 ; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v7 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v6 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 @@ -1839,15 +1839,15 @@ ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB12_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[2:3], 1 -; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v7 -; GCN-IR-NEXT: v_or_b32_e32 v5, v4, v6 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2 ; GCN-IR-NEXT: .LBB12_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_mul_lo_u32 v2, v0, v2 -; GCN-IR-NEXT: v_mul_hi_u32 v3, v0, v5 -; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, v5 -; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, v5 +; GCN-IR-NEXT: v_mul_lo_u32 v2, v0, v5 +; GCN-IR-NEXT: v_mul_hi_u32 v3, v0, v4 +; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, v4 +; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, v4 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, v3, v2 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 0x8000, v0 @@ -1873,87 +1873,87 @@ ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-IR-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v2 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v2 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v0 -; GCN-IR-NEXT: v_add_i32_e64 v3, s[4:5], 32, v3 -; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v1 -; GCN-IR-NEXT: v_min_u32_e32 v8, v3, v4 -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v8 -; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5] +; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1 +; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc +; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 +; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 +; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v10 +; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v3, v2 +; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v13, v12 ; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3] ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1 -; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[4:5] -; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v0, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v1, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v0, 0, s[4:5] ; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB13_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v9, vcc, 1, v4 -; GCN-IR-NEXT: v_addc_u32_e32 v10, vcc, 0, v5, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10] -; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4 -; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2 +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB13_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v9 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 0xffffffcf, v8 -; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 -; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], 0, -1, vcc +; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v6 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v10 +; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], 0, -1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v13, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff ; GCN-IR-NEXT: .LBB13_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 -; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5 -; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, s12, v10 -; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 -; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, 0, v11, vcc -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8 -; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6 -; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc -; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12 -; GCN-IR-NEXT: v_and_b32_e32 v12, 0x8000, v12 -; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5 -; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12 -; GCN-IR-NEXT: v_mov_b32_e32 v13, v7 -; GCN-IR-NEXT: v_subbrev_u32_e64 v11, s[4:5], 0, v11, s[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1 +; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 +; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v8 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v9, vcc +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6 +; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10 +; GCN-IR-NEXT: v_and_b32_e32 v10, 0x8000, v10 +; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7] +; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10 +; GCN-IR-NEXT: v_mov_b32_e32 v11, v5 +; GCN-IR-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v12, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v10, v4 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB13_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB13_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 -; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5 -; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2 ; GCN-IR-NEXT: .LBB13_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[6:7], 15 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v4 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v5, vcc -; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v2 -; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v3 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[4:5], 15 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12 +; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v13 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] %result = srem i64 %x, 32768 ret i64 %result diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -136,34 +136,34 @@ ; GCN-IR-NEXT: s_add_i32 s6, s6, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 ; GCN-IR-NEXT: s_min_u32 s10, s10, s11 -; GCN-IR-NEXT: s_min_u32 s14, s6, s7 -; GCN-IR-NEXT: s_sub_u32 s12, s10, s14 +; GCN-IR-NEXT: s_min_u32 s16, s6, s7 +; GCN-IR-NEXT: s_sub_u32 s12, s10, s16 ; GCN-IR-NEXT: s_subb_u32 s13, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[12:13], 63 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[8:9], s[16:17] -; GCN-IR-NEXT: s_and_b64 s[8:9], s[16:17], exec +; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15] +; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec ; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3 ; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19] ; GCN-IR-NEXT: s_mov_b64 s[6:7], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15] ; GCN-IR-NEXT: s_mov_b32 s11, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s16, s12, 1 -; GCN-IR-NEXT: s_addc_u32 s17, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[16:17], 0 +; GCN-IR-NEXT: s_add_u32 s14, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s15, s13, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0 ; GCN-IR-NEXT: s_sub_i32 s12, 63, s12 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s16 -; GCN-IR-NEXT: s_add_u32 s15, s4, -1 -; GCN-IR-NEXT: s_addc_u32 s16, s5, -1 +; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14 +; GCN-IR-NEXT: s_add_u32 s14, s4, -1 +; GCN-IR-NEXT: s_addc_u32 s15, s5, -1 ; GCN-IR-NEXT: s_not_b64 s[2:3], s[10:11] -; GCN-IR-NEXT: s_add_u32 s2, s2, s14 +; GCN-IR-NEXT: s_add_u32 s2, s2, s16 ; GCN-IR-NEXT: s_addc_u32 s3, s3, 0 ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: s_mov_b32 s7, 0 @@ -174,8 +174,8 @@ ; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1 ; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[6:7] ; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9] -; GCN-IR-NEXT: s_sub_u32 s6, s15, s12 -; GCN-IR-NEXT: s_subb_u32 s6, s16, s13 +; GCN-IR-NEXT: s_sub_u32 s6, s14, s12 +; GCN-IR-NEXT: s_subb_u32 s6, s15, s13 ; GCN-IR-NEXT: s_ashr_i32 s10, s6, 31 ; GCN-IR-NEXT: s_mov_b32 s11, s10 ; GCN-IR-NEXT: s_and_b32 s6, s10, 1 @@ -184,9 +184,9 @@ ; GCN-IR-NEXT: s_subb_u32 s13, s13, s11 ; GCN-IR-NEXT: s_add_u32 s2, s2, 1 ; GCN-IR-NEXT: s_addc_u32 s3, s3, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[2:3], 0 ; GCN-IR-NEXT: s_mov_b64 s[10:11], s[6:7] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3 ; GCN-IR-NEXT: .LBB0_4: ; %Flow6 ; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[8:9], 1 @@ -319,12 +319,12 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2 ; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3 -; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5 +; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5 ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_min_u32_e32 v9, v4, v5 -; GCN-IR-NEXT: v_sub_i32_e64 v6, s[6:7], v8, v9 +; GCN-IR-NEXT: v_min_u32_e32 v11, v4, v5 +; GCN-IR-NEXT: v_sub_i32_e64 v6, s[6:7], v10, v11 ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3] ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7] @@ -339,10 +339,10 @@ ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB1_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v6 -; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc ; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v6 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9] ; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4 ; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 @@ -351,38 +351,38 @@ ; GCN-IR-NEXT: s_cbranch_execz .LBB1_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v2 -; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v10 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v8 ; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc -; GCN-IR-NEXT: v_not_b32_e32 v0, v8 +; GCN-IR-NEXT: v_not_b32_e32 v0, v10 ; GCN-IR-NEXT: v_not_b32_e32 v1, 0 -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v9 -; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v11 +; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 ; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v11, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 ; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5 -; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6 +; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6 ; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v12, v10 -; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v13, v11, vcc -; GCN-IR-NEXT: v_or_b32_e32 v4, v8, v4 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v12, v8 +; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v13, v9, vcc +; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6 ; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0 -; GCN-IR-NEXT: v_or_b32_e32 v5, v9, v5 -; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v3 -; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v2 +; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5 +; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10 +; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3 +; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2 ; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] -; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v8 -; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v9, s[4:5] -; GCN-IR-NEXT: v_mov_b32_e32 v9, v7 +; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10 +; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5] +; GCN-IR-NEXT: v_mov_b32_e32 v11, v7 ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v8, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v10, v6 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow @@ -804,33 +804,33 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s4, s8 ; GCN-IR-NEXT: s_add_i32 s4, s4, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s5, s9 -; GCN-IR-NEXT: s_min_u32 s14, s4, s5 -; GCN-IR-NEXT: s_sub_u32 s12, s10, s14 +; GCN-IR-NEXT: s_min_u32 s16, s4, s5 +; GCN-IR-NEXT: s_sub_u32 s12, s10, s16 ; GCN-IR-NEXT: s_subb_u32 s13, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[12:13], 63 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[6:7], s[16:17] -; GCN-IR-NEXT: s_and_b64 s[6:7], s[16:17], exec +; GCN-IR-NEXT: s_or_b64 s[14:15], s[6:7], s[14:15] +; GCN-IR-NEXT: s_and_b64 s[6:7], s[14:15], exec ; GCN-IR-NEXT: s_cselect_b32 s7, 0, s9 ; GCN-IR-NEXT: s_cselect_b32 s6, 0, s8 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19] ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15] ; GCN-IR-NEXT: s_cbranch_vccz .LBB7_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s16, s12, 1 -; GCN-IR-NEXT: s_addc_u32 s17, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[16:17], 0 +; GCN-IR-NEXT: s_add_u32 s14, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s15, s13, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[14:15], 0 ; GCN-IR-NEXT: s_sub_i32 s12, 63, s12 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7] ; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[8:9], s12 ; GCN-IR-NEXT: s_cbranch_vccz .LBB7_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[8:9], s16 -; GCN-IR-NEXT: s_add_u32 s15, s2, -1 -; GCN-IR-NEXT: s_addc_u32 s16, s3, -1 +; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[8:9], s14 +; GCN-IR-NEXT: s_add_u32 s14, s2, -1 +; GCN-IR-NEXT: s_addc_u32 s15, s3, -1 ; GCN-IR-NEXT: s_not_b64 s[4:5], s[10:11] -; GCN-IR-NEXT: s_add_u32 s8, s4, s14 +; GCN-IR-NEXT: s_add_u32 s8, s4, s16 ; GCN-IR-NEXT: s_addc_u32 s9, s5, 0 ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: s_mov_b32 s5, 0 @@ -841,8 +841,8 @@ ; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1 ; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5] ; GCN-IR-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7] -; GCN-IR-NEXT: s_sub_u32 s4, s15, s12 -; GCN-IR-NEXT: s_subb_u32 s4, s16, s13 +; GCN-IR-NEXT: s_sub_u32 s4, s14, s12 +; GCN-IR-NEXT: s_subb_u32 s4, s15, s13 ; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31 ; GCN-IR-NEXT: s_mov_b32 s11, s10 ; GCN-IR-NEXT: s_and_b32 s4, s10, 1 @@ -851,9 +851,9 @@ ; GCN-IR-NEXT: s_subb_u32 s13, s13, s11 ; GCN-IR-NEXT: s_add_u32 s8, s8, 1 ; GCN-IR-NEXT: s_addc_u32 s9, s9, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0 ; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19] +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17] ; GCN-IR-NEXT: s_cbranch_vccz .LBB7_3 ; GCN-IR-NEXT: .LBB7_4: ; %Flow3 ; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[6:7], 1 @@ -989,32 +989,32 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s8, s2 ; GCN-IR-NEXT: s_flbit_i32_b32 s9, s3 ; GCN-IR-NEXT: s_add_i32 s8, s8, 32 -; GCN-IR-NEXT: s_min_u32 s8, s8, s9 -; GCN-IR-NEXT: s_add_u32 s10, s8, 0xffffffc5 -; GCN-IR-NEXT: s_addc_u32 s11, 0, -1 +; GCN-IR-NEXT: s_min_u32 s12, s8, s9 +; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s9, 0, -1 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[10:11], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 63 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[6:7], s[12:13] -; GCN-IR-NEXT: s_and_b64 s[6:7], s[12:13], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11] +; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec ; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15] -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_mov_b32 s7, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s12, s10, 1 -; GCN-IR-NEXT: s_addc_u32 s13, s11, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[12:13], 0 -; GCN-IR-NEXT: s_sub_i32 s9, 63, s10 +; GCN-IR-NEXT: s_add_u32 s10, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s9, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0 +; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7] -; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s9 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8 ; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s12 +; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10 ; GCN-IR-NEXT: s_add_u32 s14, s2, -1 ; GCN-IR-NEXT: s_addc_u32 s15, s3, -1 -; GCN-IR-NEXT: s_sub_u32 s8, 58, s8 +; GCN-IR-NEXT: s_sub_u32 s8, 58, s12 ; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 ; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 ; GCN-IR-NEXT: s_mov_b32 s5, 0 @@ -1160,8 +1160,8 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffd0, v6 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffd0, v10 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] ; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] @@ -1176,10 +1176,10 @@ ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB9_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc ; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1190,8 +1190,8 @@ ; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0 ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000 ; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v7 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v6 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 @@ -1251,8 +1251,8 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v6 +; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v8 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5] @@ -1265,10 +1265,10 @@ ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB10_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc ; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1276,35 +1276,35 @@ ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB10_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[7:8], v[0:1], v7 -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffcf, v6 -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v6 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffcf, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], 0, -1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff ; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[7:8], v[7:8], 1 +; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 -; GCN-IR-NEXT: v_or_b32_e32 v6, v7, v4 +; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4 ; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v8, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0 ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v7, 31, v4 +; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0 +; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4 ; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v7 -; GCN-IR-NEXT: v_and_b32_e32 v7, 0x8000, v7 +; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8 +; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8 ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] -; GCN-IR-NEXT: v_or_b32_e32 v3, v10, v3 -; GCN-IR-NEXT: v_or_b32_e32 v2, v9, v2 -; GCN-IR-NEXT: v_sub_i32_e64 v7, s[4:5], v6, v7 -; GCN-IR-NEXT: v_mov_b32_e32 v10, v5 -; GCN-IR-NEXT: v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5] +; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v9, v5 +; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v9, v4 +; GCN-IR-NEXT: v_mov_b32_e32 v8, v4 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB10_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow @@ -1435,31 +1435,31 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s6, s2 ; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 ; GCN-IR-NEXT: s_add_i32 s6, s6, 32 -; GCN-IR-NEXT: s_min_u32 s10, s6, s7 -; GCN-IR-NEXT: s_sub_u32 s8, 59, s10 +; GCN-IR-NEXT: s_min_u32 s12, s6, s7 +; GCN-IR-NEXT: s_sub_u32 s8, 59, s12 ; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0 ; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63 ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] ; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec ; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3 ; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2 -; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13] +; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11] ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB11_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s12, s8, 1 -; GCN-IR-NEXT: s_addc_u32 s13, s9, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[12:13], 0 +; GCN-IR-NEXT: s_add_u32 s10, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s9, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0 ; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7] ; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8 ; GCN-IR-NEXT: s_cbranch_vccz .LBB11_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s12 -; GCN-IR-NEXT: s_add_u32 s2, s10, 0xffffffc4 +; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s10 +; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc4 ; GCN-IR-NEXT: s_addc_u32 s3, 0, -1 ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 ; GCN-IR-NEXT: s_mov_b32 s5, 0 @@ -1602,8 +1602,8 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 59, v6 +; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 59, v8 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5] @@ -1616,10 +1616,10 @@ ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB12_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc ; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1627,34 +1627,34 @@ ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB12_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[7:8], v[0:1], v7 -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc4, v6 -; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v6 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc4, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], 0, -1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 -; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-IR-NEXT: v_lshl_b64 v[7:8], v[7:8], 1 +; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3 -; GCN-IR-NEXT: v_or_b32_e32 v6, v7, v4 +; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4 ; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 23, v6 -; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v8, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0 ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v7, 31, v4 +; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0 +; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2 +; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4 ; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v7 -; GCN-IR-NEXT: v_and_b32_e32 v7, 24, v7 +; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8 +; GCN-IR-NEXT: v_and_b32_e32 v8, 24, v8 ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] -; GCN-IR-NEXT: v_or_b32_e32 v3, v10, v3 -; GCN-IR-NEXT: v_or_b32_e32 v2, v9, v2 -; GCN-IR-NEXT: v_sub_i32_e64 v7, s[4:5], v6, v7 -; GCN-IR-NEXT: v_mov_b32_e32 v10, v5 -; GCN-IR-NEXT: v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5] +; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8 +; GCN-IR-NEXT: v_mov_b32_e32 v9, v5 +; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v9, v4 +; GCN-IR-NEXT: v_mov_b32_e32 v8, v4 ; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3 ; GCN-IR-NEXT: ; %bb.4: ; %Flow diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll --- a/llvm/test/CodeGen/AMDGPU/urem64.ll +++ b/llvm/test/CodeGen/AMDGPU/urem64.ll @@ -135,34 +135,34 @@ ; GCN-IR-NEXT: s_add_i32 s6, s6, 32 ; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 ; GCN-IR-NEXT: s_min_u32 s10, s10, s11 -; GCN-IR-NEXT: s_min_u32 s14, s6, s7 -; GCN-IR-NEXT: s_sub_u32 s12, s10, s14 +; GCN-IR-NEXT: s_min_u32 s18, s6, s7 +; GCN-IR-NEXT: s_sub_u32 s12, s10, s18 ; GCN-IR-NEXT: s_subb_u32 s13, 0, 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[12:13], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[8:9], s[16:17] -; GCN-IR-NEXT: s_and_b64 s[8:9], s[16:17], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63 +; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15] +; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec ; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3 ; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2 -; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19] +; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] ; GCN-IR-NEXT: s_mov_b64 s[6:7], 0 -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15] ; GCN-IR-NEXT: s_mov_b32 s11, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s16, s12, 1 -; GCN-IR-NEXT: s_addc_u32 s17, s13, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[16:17], 0 +; GCN-IR-NEXT: s_add_u32 s14, s12, 1 +; GCN-IR-NEXT: s_addc_u32 s15, s13, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0 ; GCN-IR-NEXT: s_sub_i32 s12, 63, s12 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12 ; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s16 +; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14 ; GCN-IR-NEXT: s_add_u32 s16, s4, -1 ; GCN-IR-NEXT: s_addc_u32 s17, s5, -1 ; GCN-IR-NEXT: s_not_b64 s[6:7], s[10:11] -; GCN-IR-NEXT: s_add_u32 s10, s6, s14 +; GCN-IR-NEXT: s_add_u32 s10, s6, s18 ; GCN-IR-NEXT: s_addc_u32 s11, s7, 0 ; GCN-IR-NEXT: s_mov_b64 s[14:15], 0 ; GCN-IR-NEXT: s_mov_b32 s7, 0 @@ -328,30 +328,30 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2 ; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3 -; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5 +; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5 ; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0 ; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4 ; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1 -; GCN-IR-NEXT: v_min_u32_e32 v9, v4, v5 -; GCN-IR-NEXT: v_sub_i32_e64 v5, s[6:7], v8, v9 +; GCN-IR-NEXT: v_min_u32_e32 v13, v4, v5 +; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v12, v13 ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3] ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_subb_u32_e64 v6, s[6:7], 0, 0, s[6:7] -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[5:6] +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7] +; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[4:5] ; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[5:6] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1 ; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[4:5] -; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v0, 0, s[4:5] +; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v0, 0, s[4:5] ; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB1_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v5 -; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v6, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v5 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11] +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9] ; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4 ; GCN-IR-NEXT: v_mov_b32_e32 v6, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v7, 0 @@ -361,10 +361,10 @@ ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2 ; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc -; GCN-IR-NEXT: v_not_b32_e32 v7, v8 -; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v10 +; GCN-IR-NEXT: v_not_b32_e32 v7, v12 +; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8 ; GCN-IR-NEXT: v_not_b32_e32 v6, 0 -; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v9 +; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13 ; GCN-IR-NEXT: v_mov_b32_e32 v12, 0 ; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 @@ -400,15 +400,15 @@ ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] ; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 ; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5 -; GCN-IR-NEXT: v_or_b32_e32 v4, v6, v4 +; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4 ; GCN-IR-NEXT: .LBB1_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_mul_lo_u32 v5, v2, v7 -; GCN-IR-NEXT: v_mul_hi_u32 v6, v2, v4 -; GCN-IR-NEXT: v_mul_lo_u32 v3, v3, v4 -; GCN-IR-NEXT: v_mul_lo_u32 v2, v2, v4 -; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, v6, v5 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, v5, v3 +; GCN-IR-NEXT: v_mul_lo_u32 v4, v2, v7 +; GCN-IR-NEXT: v_mul_hi_u32 v5, v2, v6 +; GCN-IR-NEXT: v_mul_lo_u32 v3, v3, v6 +; GCN-IR-NEXT: v_mul_lo_u32 v2, v2, v6 +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v5, v4 +; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, v4, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc ; GCN-IR-NEXT: s_setpc_b64 s[30:31] @@ -817,32 +817,32 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s8, s2 ; GCN-IR-NEXT: s_flbit_i32_b32 s9, s3 ; GCN-IR-NEXT: s_add_i32 s8, s8, 32 -; GCN-IR-NEXT: s_min_u32 s8, s8, s9 -; GCN-IR-NEXT: s_add_u32 s10, s8, 0xffffffc5 -; GCN-IR-NEXT: s_addc_u32 s11, 0, -1 +; GCN-IR-NEXT: s_min_u32 s12, s8, s9 +; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5 +; GCN-IR-NEXT: s_addc_u32 s9, 0, -1 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[10:11], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 63 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[6:7], s[12:13] -; GCN-IR-NEXT: s_and_b64 s[6:7], s[12:13], exec +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63 +; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11] +; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec ; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15] -; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15] +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_mov_b32 s7, 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB6_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s12, s10, 1 -; GCN-IR-NEXT: s_addc_u32 s13, s11, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[12:13], 0 -; GCN-IR-NEXT: s_sub_i32 s9, 63, s10 +; GCN-IR-NEXT: s_add_u32 s10, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s9, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0 +; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7] -; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s9 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8 ; GCN-IR-NEXT: s_cbranch_vccz .LBB6_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s12 +; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10 ; GCN-IR-NEXT: s_add_u32 s14, s2, -1 ; GCN-IR-NEXT: s_addc_u32 s15, s3, -1 -; GCN-IR-NEXT: s_sub_u32 s8, 58, s8 +; GCN-IR-NEXT: s_sub_u32 s8, 58, s12 ; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 ; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 ; GCN-IR-NEXT: s_mov_b32 s5, 0 @@ -1001,31 +1001,31 @@ ; GCN-IR-NEXT: s_flbit_i32_b32 s6, s2 ; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 ; GCN-IR-NEXT: s_add_i32 s6, s6, 32 -; GCN-IR-NEXT: s_min_u32 s8, s6, s7 -; GCN-IR-NEXT: s_sub_u32 s10, 59, s8 -; GCN-IR-NEXT: s_subb_u32 s11, 0, 0 +; GCN-IR-NEXT: s_min_u32 s12, s6, s7 +; GCN-IR-NEXT: s_sub_u32 s8, 59, s12 +; GCN-IR-NEXT: s_subb_u32 s9, 0, 0 ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[10:11], 63 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[10:11], 63 +; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63 ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7] ; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec ; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3 ; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2 -; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13] +; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11] ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0 ; GCN-IR-NEXT: s_cbranch_vccz .LBB7_5 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: s_add_u32 s12, s10, 1 -; GCN-IR-NEXT: s_addc_u32 s13, s11, 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[12:13], 0 -; GCN-IR-NEXT: s_sub_i32 s9, 63, s10 +; GCN-IR-NEXT: s_add_u32 s10, s8, 1 +; GCN-IR-NEXT: s_addc_u32 s11, s9, 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0 +; GCN-IR-NEXT: s_sub_i32 s8, 63, s8 ; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7] -; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s9 +; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8 ; GCN-IR-NEXT: s_cbranch_vccz .LBB7_4 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: s_lshr_b64 s[10:11], s[2:3], s12 -; GCN-IR-NEXT: s_add_u32 s8, s8, 0xffffffc4 +; GCN-IR-NEXT: s_lshr_b64 s[10:11], s[2:3], s10 +; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc4 ; GCN-IR-NEXT: s_addc_u32 s9, 0, -1 ; GCN-IR-NEXT: s_mov_b64 s[12:13], 0 ; GCN-IR-NEXT: s_mov_b32 s5, 0 @@ -1178,26 +1178,26 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 0xffffffd0, v6 -; GCN-IR-NEXT: v_addc_u32_e64 v4, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 +; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 0xffffffd0, v10 +; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1] -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] -; GCN-IR-NEXT: v_mov_b32_e32 v5, 0x8000 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_mov_b32_e32 v4, 0x8000 ; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] -; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v5, 0, s[4:5] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3] +; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5] ; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1 -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 ; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000 ; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], vcc ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB8_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v3 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v4, vcc -; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v3 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1208,8 +1208,8 @@ ; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0 ; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000 ; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc -; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v7 -; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v6 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6 +; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0 @@ -1243,15 +1243,15 @@ ; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11] ; GCN-IR-NEXT: .LBB8_5: ; %Flow3 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9] -; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[2:3], 1 -; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v7 -; GCN-IR-NEXT: v_or_b32_e32 v5, v4, v6 +; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1 +; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3 +; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2 ; GCN-IR-NEXT: .LBB8_6: ; %Flow4 ; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7] -; GCN-IR-NEXT: v_mul_lo_u32 v2, v0, v2 -; GCN-IR-NEXT: v_mul_hi_u32 v3, v0, v5 -; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, v5 -; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, v5 +; GCN-IR-NEXT: v_mul_lo_u32 v2, v0, v5 +; GCN-IR-NEXT: v_mul_hi_u32 v3, v0, v4 +; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, v4 +; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, v4 ; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, v3, v2 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 0x8000, v0 @@ -1275,8 +1275,8 @@ ; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0 ; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2 ; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1 -; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3 -; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v6 +; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3 +; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v10 ; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5] ; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[2:3] @@ -1289,10 +1289,10 @@ ; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB9_6 ; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v2 -; GCN-IR-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc ; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2 -; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8] +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7] ; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2 ; GCN-IR-NEXT: v_mov_b32_e32 v4, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v5, 0 @@ -1300,8 +1300,8 @@ ; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5] ; GCN-IR-NEXT: s_cbranch_execz .LBB9_5 ; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v7 -; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v6 +; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v6 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v10 ; GCN-IR-NEXT: v_mov_b32_e32 v10, 0 ; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], 0, -1, vcc ; GCN-IR-NEXT: s_mov_b64 s[10:11], 0