diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h --- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h @@ -19,8 +19,8 @@ namespace AMDGPU { -/// Returns Base register, constant offset, and offset def point. -std::tuple +/// Returns base register and constant offset. +std::pair getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg); bool isLegalVOP3PShuffleMask(ArrayRef Mask); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp @@ -13,11 +13,11 @@ using namespace llvm; using namespace MIPatternMatch; -std::tuple +std::pair AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg) { MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); if (!Def) - return std::make_tuple(Reg, 0, nullptr); + return std::make_pair(Reg, 0); if (Def->getOpcode() == TargetOpcode::G_CONSTANT) { unsigned Offset; @@ -27,21 +27,21 @@ else Offset = Op.getCImm()->getZExtValue(); - return std::make_tuple(Register(), Offset, Def); + return std::make_pair(Register(), Offset); } int64_t Offset; if (Def->getOpcode() == TargetOpcode::G_ADD) { // TODO: Handle G_OR used for add case if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset))) - return std::make_tuple(Def->getOperand(1).getReg(), Offset, Def); + return std::make_pair(Def->getOperand(1).getReg(), Offset); // FIXME: matcher should ignore copies if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset)))) - return std::make_tuple(Def->getOperand(1).getReg(), Offset, Def); + return std::make_pair(Def->getOperand(1).getReg(), Offset); } - return std::make_tuple(Reg, 0, Def); + return std::make_pair(Reg, 0); } bool AMDGPU::isLegalVOP3PShuffleMask(ArrayRef Mask) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -1353,8 +1353,8 @@ BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) .addImm(0); } else { - std::tie(BaseOffset, ImmOffset, OffsetDef) - = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); + std::tie(BaseOffset, ImmOffset) = + AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); if (Readfirstlane) { // We have the constant offset now, so put the readfirstlane back on the @@ -2573,10 +2573,8 @@ unsigned EltSize) { Register IdxBaseReg; int Offset; - MachineInstr *Unused; - std::tie(IdxBaseReg, Offset, Unused) - = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); + std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); if (IdxBaseReg == AMDGPU::NoRegister) { // This will happen if the index is a known constant. This should ordinarily // be legalized out, but handle it as a register just in case. diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -3488,11 +3488,10 @@ const unsigned MaxImm = 4095; Register BaseReg; unsigned TotalConstOffset; - MachineInstr *OffsetDef; const LLT S32 = LLT::scalar(32); - std::tie(BaseReg, TotalConstOffset, OffsetDef) - = AMDGPU::getBaseWithConstantOffset(*B.getMRI(), OrigOffset); + std::tie(BaseReg, TotalConstOffset) = + AMDGPU::getBaseWithConstantOffset(*B.getMRI(), OrigOffset); unsigned ImmOffset = TotalConstOffset; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1347,10 +1347,9 @@ Register Base; unsigned Offset; - MachineInstr *Unused; - std::tie(Base, Offset, Unused) - = AMDGPU::getBaseWithConstantOffset(*MRI, CombinedOffset); + std::tie(Base, Offset) = + AMDGPU::getBaseWithConstantOffset(*MRI, CombinedOffset); uint32_t SOffset, ImmOffset; if (Offset > 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, @@ -2708,8 +2707,7 @@ Register BaseIdxReg; unsigned ConstOffset; - MachineInstr *OffsetDef; - std::tie(BaseIdxReg, ConstOffset, OffsetDef) = + std::tie(BaseIdxReg, ConstOffset) = AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(2).getReg()); // See if the index is an add of a constant which will be foldable by moving @@ -2840,9 +2838,8 @@ Register BaseIdxReg; unsigned ConstOffset; - MachineInstr *OffsetDef; - std::tie(BaseIdxReg, ConstOffset, OffsetDef) = - AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(3).getReg()); + std::tie(BaseIdxReg, ConstOffset) = + AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(3).getReg()); // See if the index is an add of a constant which will be foldable by moving // the base register of the index later if this is going to be executed in a