diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -158,8 +158,7 @@ return true; } - void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII, - const GCNSubtarget &STM); + void setMI(MachineBasicBlock::iterator MI, const SILoadStoreOptimizer &LSO); }; struct BaseRegisters { @@ -484,11 +483,10 @@ } void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI, - const SIInstrInfo &TII, - const GCNSubtarget &STM) { + const SILoadStoreOptimizer &LSO) { I = MI; unsigned Opc = MI->getOpcode(); - InstClass = getInstClass(Opc, TII); + InstClass = getInstClass(Opc, *LSO.TII); if (InstClass == UNKNOWN) return; @@ -505,7 +503,7 @@ : 4; break; case S_BUFFER_LOAD_IMM: - EltSize = AMDGPU::convertSMRDOffsetUnits(STM, 4); + EltSize = AMDGPU::convertSMRDOffsetUnits(*LSO.STM, 4); break; default: EltSize = 4; @@ -513,7 +511,7 @@ } if (InstClass == MIMG) { - DMask = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm(); + DMask = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm(); // Offset is not considered for MIMG instructions. Offset = 0; } else { @@ -522,17 +520,17 @@ } if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE) - Format = TII.getNamedOperand(*I, AMDGPU::OpName::format)->getImm(); + Format = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::format)->getImm(); - Width = getOpcodeWidth(*I, TII); + Width = getOpcodeWidth(*I, *LSO.TII); if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) { Offset &= 0xffff; } else if (InstClass != MIMG) { - CPol = TII.getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm(); + CPol = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm(); } - AddressRegs Regs = getRegs(Opc, TII); + AddressRegs Regs = getRegs(Opc, *LSO.TII); NumAddresses = 0; for (unsigned J = 0; J < Regs.NumVAddrs; J++) @@ -2010,7 +2008,7 @@ continue; CombineInfo CI; - CI.setMI(MI, *TII, *STM); + CI.setMI(MI, *this); CI.Order = Order++; if (!CI.hasMergeableAddress(*MRI)) @@ -2123,54 +2121,54 @@ case DS_READ: { MachineBasicBlock::iterator NewMI = mergeRead2Pair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); break; } case DS_WRITE: { MachineBasicBlock::iterator NewMI = mergeWrite2Pair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); break; } case S_BUFFER_LOAD_IMM: { MachineBasicBlock::iterator NewMI = mergeSBufferLoadImmPair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); OptimizeListAgain |= (CI.Width + Paired.Width) < 8; break; } case BUFFER_LOAD: { MachineBasicBlock::iterator NewMI = mergeBufferLoadPair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); OptimizeListAgain |= (CI.Width + Paired.Width) < 4; break; } case BUFFER_STORE: { MachineBasicBlock::iterator NewMI = mergeBufferStorePair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); OptimizeListAgain |= (CI.Width + Paired.Width) < 4; break; } case MIMG: { MachineBasicBlock::iterator NewMI = mergeImagePair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); OptimizeListAgain |= (CI.Width + Paired.Width) < 4; break; } case TBUFFER_LOAD: { MachineBasicBlock::iterator NewMI = mergeTBufferLoadPair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); OptimizeListAgain |= (CI.Width + Paired.Width) < 4; break; } case TBUFFER_STORE: { MachineBasicBlock::iterator NewMI = mergeTBufferStorePair(CI, Paired, InstsToMove); - CI.setMI(NewMI, *TII, *STM); + CI.setMI(NewMI, *this); OptimizeListAgain |= (CI.Width + Paired.Width) < 4; break; }