diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -26,15 +26,38 @@ namespace { class SIShrinkInstructions : public MachineFunctionPass { + MachineRegisterInfo *MRI; + const GCNSubtarget *ST; + const SIInstrInfo *TII; + const SIRegisterInfo *TRI; + public: static char ID; - void shrinkMIMG(MachineInstr &MI); - public: SIShrinkInstructions() : MachineFunctionPass(ID) { } + bool foldImmediates(MachineInstr &MI, bool TryToCommute = true) const; + bool isKImmOperand(const MachineOperand &Src) const; + bool isKUImmOperand(const MachineOperand &Src) const; + bool isKImmOrKUImmOperand(const MachineOperand &Src, bool &IsUnsigned) const; + bool isReverseInlineImm(const MachineOperand &Src, int32_t &ReverseImm) const; + void copyExtraImplicitOps(MachineInstr &NewMI, MachineInstr &MI) const; + void shrinkScalarCompare(MachineInstr &MI) const; + void shrinkMIMG(MachineInstr &MI) const; + bool shrinkScalarLogicOp(MachineInstr &MI) const; + bool instAccessReg(iterator_range &&R, + Register Reg, unsigned SubReg) const; + bool instReadsReg(const MachineInstr *MI, unsigned Reg, + unsigned SubReg) const; + bool instModifiesReg(const MachineInstr *MI, unsigned Reg, + unsigned SubReg) const; + TargetInstrInfo::RegSubRegPair getSubRegForIndex(Register Reg, unsigned Sub, + unsigned I) const; + void dropInstructionKeepingImpDefs(MachineInstr &MI) const; + MachineInstr *matchSwap(MachineInstr &MovT) const; + bool runOnMachineFunction(MachineFunction &MF) override; StringRef getPassName() const override { return "SI Shrink Instructions"; } @@ -59,8 +82,8 @@ /// This function checks \p MI for operands defined by a move immediate /// instruction and then folds the literal constant into the instruction if it /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions. -static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, - MachineRegisterInfo &MRI, bool TryToCommute = true) { +bool SIShrinkInstructions::foldImmediates(MachineInstr &MI, + bool TryToCommute) const { assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI)); int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); @@ -69,8 +92,8 @@ MachineOperand &Src0 = MI.getOperand(Src0Idx); if (Src0.isReg()) { Register Reg = Src0.getReg(); - if (Reg.isVirtual() && MRI.hasOneUse(Reg)) { - MachineInstr *Def = MRI.getUniqueVRegDef(Reg); + if (Reg.isVirtual() && MRI->hasOneUse(Reg)) { + MachineInstr *Def = MRI->getUniqueVRegDef(Reg); if (Def && Def->isMoveImmediate()) { MachineOperand &MovSrc = Def->getOperand(1); bool ConstantFolded = false; @@ -91,7 +114,7 @@ } if (ConstantFolded) { - assert(MRI.use_empty(Reg)); + assert(MRI->use_empty(Reg)); Def->eraseFromParent(); ++NumLiteralConstantsFolded; return true; @@ -103,7 +126,7 @@ // We have failed to fold src0, so commute the instruction and try again. if (TryToCommute && MI.isCommutable()) { if (TII->commuteInstruction(MI)) { - if (foldImmediates(MI, TII, MRI, false)) + if (foldImmediates(MI, false)) return true; // Commute back. @@ -114,21 +137,20 @@ return false; } -static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { +bool SIShrinkInstructions::isKImmOperand(const MachineOperand &Src) const { return isInt<16>(Src.getImm()) && !TII->isInlineConstant(*Src.getParent(), Src.getParent()->getOperandNo(&Src)); } -static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) { +bool SIShrinkInstructions::isKUImmOperand(const MachineOperand &Src) const { return isUInt<16>(Src.getImm()) && !TII->isInlineConstant(*Src.getParent(), Src.getParent()->getOperandNo(&Src)); } -static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, - const MachineOperand &Src, - bool &IsUnsigned) { +bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src, + bool &IsUnsigned) const { if (isInt<16>(Src.getImm())) { IsUnsigned = false; return !TII->isInlineConstant(Src); @@ -144,9 +166,8 @@ /// \returns true if the constant in \p Src should be replaced with a bitreverse /// of an inline immediate. -static bool isReverseInlineImm(const SIInstrInfo *TII, - const MachineOperand &Src, - int32_t &ReverseImm) { +bool SIShrinkInstructions::isReverseInlineImm(const MachineOperand &Src, + int32_t &ReverseImm) const { if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src)) return false; @@ -156,8 +177,9 @@ /// Copy implicit register operands from specified instruction to this /// instruction that are not part of the instruction definition. -static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF, - const MachineInstr &MI) { +void SIShrinkInstructions::copyExtraImplicitOps(MachineInstr &NewMI, + MachineInstr &MI) const { + MachineFunction &MF = *MI.getMF(); for (unsigned i = MI.getDesc().getNumOperands() + MI.getDesc().getNumImplicitUses() + MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands(); @@ -168,7 +190,7 @@ } } -static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) { +void SIShrinkInstructions::shrinkScalarCompare(MachineInstr &MI) const { // cmpk instructions do scc = dst imm16, so commute the instruction to // get constants on the RHS. if (!MI.getOperand(0).isReg()) @@ -191,7 +213,7 @@ // and initially selected to the unsigned versions. if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { bool HasUImm; - if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) { + if (isKImmOrKUImmOperand(Src1, HasUImm)) { if (!HasUImm) { SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; @@ -205,22 +227,18 @@ const MCInstrDesc &NewDesc = TII->get(SOPKOpc); - if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) || - (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) { + if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(Src1)) || + (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(Src1))) { MI.setDesc(NewDesc); } } // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding. -void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) { +void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) const { const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); if (!Info || Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA) return; - MachineFunction *MF = MI.getParent()->getParent(); - const GCNSubtarget &ST = MF->getSubtarget(); - const SIInstrInfo *TII = ST.getInstrInfo(); - const SIRegisterInfo &TRI = TII->getRegisterInfo(); int VAddr0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); unsigned NewAddrDwords = Info->VAddrDwords; @@ -250,7 +268,7 @@ bool IsKill = NewAddrDwords == Info->VAddrDwords; for (unsigned i = 0; i < Info->VAddrDwords; ++i) { const MachineOperand &Op = MI.getOperand(VAddr0Idx + i); - unsigned Vgpr = TRI.getHWRegIndex(Op.getReg()); + unsigned Vgpr = TRI->getHWRegIndex(Op.getReg()); if (i == 0) { VgprBase = Vgpr; @@ -311,10 +329,7 @@ /// If the inverse of the immediate is legal, use ANDN2, ORN2 or /// XNOR (as a ^ b == ~(a ^ ~b)). /// \returns true if the caller should continue the machine function iterator -static bool shrinkScalarLogicOp(const GCNSubtarget &ST, - MachineRegisterInfo &MRI, - const SIInstrInfo *TII, - MachineInstr &MI) { +bool SIShrinkInstructions::shrinkScalarLogicOp(MachineInstr &MI) const { unsigned Opc = MI.getOpcode(); const MachineOperand *Dest = &MI.getOperand(0); MachineOperand *Src0 = &MI.getOperand(1); @@ -323,7 +338,7 @@ MachineOperand *SrcImm = Src1; if (!SrcImm->isImm() || - AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST.hasInv2PiInlineImm())) + AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST->hasInv2PiInlineImm())) return false; uint32_t Imm = static_cast(SrcImm->getImm()); @@ -333,7 +348,7 @@ if (isPowerOf2_32(~Imm)) { NewImm = countTrailingOnes(Imm); Opc = AMDGPU::S_BITSET0_B32; - } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) { + } else if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) { NewImm = ~Imm; Opc = AMDGPU::S_ANDN2_B32; } @@ -341,12 +356,12 @@ if (isPowerOf2_32(Imm)) { NewImm = countTrailingZeros(Imm); Opc = AMDGPU::S_BITSET1_B32; - } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) { + } else if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) { NewImm = ~Imm; Opc = AMDGPU::S_ORN2_B32; } } else if (Opc == AMDGPU::S_XOR_B32) { - if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) { + if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) { NewImm = ~Imm; Opc = AMDGPU::S_XNOR_B32; } @@ -356,8 +371,8 @@ if (NewImm != 0) { if (Dest->getReg().isVirtual() && SrcReg->isReg()) { - MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg()); - MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg()); + MRI->setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg()); + MRI->setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg()); return true; } @@ -384,19 +399,19 @@ // This is the same as MachineInstr::readsRegister/modifiesRegister except // it takes subregs into account. -static bool instAccessReg(iterator_range &&R, - Register Reg, unsigned SubReg, - const SIRegisterInfo &TRI) { +bool SIShrinkInstructions::instAccessReg( + iterator_range &&R, Register Reg, + unsigned SubReg) const { for (const MachineOperand &MO : R) { if (!MO.isReg()) continue; if (Reg.isPhysical() && MO.getReg().isPhysical()) { - if (TRI.regsOverlap(Reg, MO.getReg())) + if (TRI->regsOverlap(Reg, MO.getReg())) return true; } else if (MO.getReg() == Reg && Reg.isVirtual()) { - LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) & - TRI.getSubRegIndexLaneMask(MO.getSubReg()); + LaneBitmask Overlap = TRI->getSubRegIndexLaneMask(SubReg) & + TRI->getSubRegIndexLaneMask(MO.getSubReg()); if (Overlap.any()) return true; } @@ -404,33 +419,31 @@ return false; } -static bool instReadsReg(const MachineInstr *MI, - unsigned Reg, unsigned SubReg, - const SIRegisterInfo &TRI) { - return instAccessReg(MI->uses(), Reg, SubReg, TRI); +bool SIShrinkInstructions::instReadsReg(const MachineInstr *MI, unsigned Reg, + unsigned SubReg) const { + return instAccessReg(MI->uses(), Reg, SubReg); } -static bool instModifiesReg(const MachineInstr *MI, - unsigned Reg, unsigned SubReg, - const SIRegisterInfo &TRI) { - return instAccessReg(MI->defs(), Reg, SubReg, TRI); +bool SIShrinkInstructions::instModifiesReg(const MachineInstr *MI, unsigned Reg, + unsigned SubReg) const { + return instAccessReg(MI->defs(), Reg, SubReg); } -static TargetInstrInfo::RegSubRegPair -getSubRegForIndex(Register Reg, unsigned Sub, unsigned I, - const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) { - if (TRI.getRegSizeInBits(Reg, MRI) != 32) { +TargetInstrInfo::RegSubRegPair +SIShrinkInstructions::getSubRegForIndex(Register Reg, unsigned Sub, + unsigned I) const { + if (TRI->getRegSizeInBits(Reg, *MRI) != 32) { if (Reg.isPhysical()) { - Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I)); + Reg = TRI->getSubReg(Reg, TRI->getSubRegFromChannel(I)); } else { - Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub)); + Sub = TRI->getSubRegFromChannel(I + TRI->getChannelFromSubReg(Sub)); } } return TargetInstrInfo::RegSubRegPair(Reg, Sub); } -static void dropInstructionKeepingImpDefs(MachineInstr &MI, - const SIInstrInfo *TII) { +void SIShrinkInstructions::dropInstructionKeepingImpDefs( + MachineInstr &MI) const { for (unsigned i = MI.getDesc().getNumOperands() + MI.getDesc().getNumImplicitUses() + MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands(); @@ -464,8 +477,7 @@ // // This is really just a generic peephole that is not a canonical shrinking, // although requirements match the pass placement and it reduces code size too. -static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI, - const SIInstrInfo *TII) { +MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const { assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 || MovT.getOpcode() == AMDGPU::COPY); @@ -480,8 +492,7 @@ unsigned Size = TII->getOpSize(MovT, 0) / 4; - const SIRegisterInfo &TRI = TII->getRegisterInfo(); - if (!TRI.isVGPR(MRI, X)) + if (!TRI->isVGPR(*MRI, X)) return nullptr; if (MovT.hasRegisterImplicitUseOperand(AMDGPU::M0)) @@ -495,7 +506,7 @@ Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) { MachineInstr *MovY = &*Iter; - KilledT = MovY->killsRegister(T, &TRI); + KilledT = MovY->killsRegister(T, TRI); if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 && MovY->getOpcode() != AMDGPU::COPY) || @@ -508,21 +519,20 @@ Register Y = MovY->getOperand(0).getReg(); unsigned Ysub = MovY->getOperand(0).getSubReg(); - if (!TRI.isVGPR(MRI, Y)) + if (!TRI->isVGPR(*MRI, Y)) continue; MachineInstr *MovX = nullptr; for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator()); I != IY; ++I) { - if (instReadsReg(&*I, X, Xsub, TRI) || - instModifiesReg(&*I, Y, Ysub, TRI) || - instModifiesReg(&*I, T, Tsub, TRI) || - (MovX && instModifiesReg(&*I, X, Xsub, TRI))) { + if (instReadsReg(&*I, X, Xsub) || instModifiesReg(&*I, Y, Ysub) || + instModifiesReg(&*I, T, Tsub) || + (MovX && instModifiesReg(&*I, X, Xsub))) { MovX = nullptr; break; } - if (!instReadsReg(&*I, Y, Ysub, TRI)) { - if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) { + if (!instReadsReg(&*I, Y, Ysub)) { + if (!MovX && instModifiesReg(&*I, X, Xsub)) { MovX = nullptr; break; } @@ -553,8 +563,8 @@ for (unsigned I = 0; I < Size; ++I) { TargetInstrInfo::RegSubRegPair X1, Y1; - X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI); - Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI); + X1 = getSubRegForIndex(X, Xsub, I); + Y1 = getSubRegForIndex(Y, Ysub, I); MachineBasicBlock &MBB = *MovT.getParent(); auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(), TII->get(AMDGPU::V_SWAP_B32)) @@ -569,17 +579,17 @@ } } MovX->eraseFromParent(); - dropInstructionKeepingImpDefs(*MovY, TII); + dropInstructionKeepingImpDefs(*MovY); MachineInstr *Next = &*std::next(MovT.getIterator()); - if (T.isVirtual() && MRI.use_nodbg_empty(T)) { - dropInstructionKeepingImpDefs(MovT, TII); + if (T.isVirtual() && MRI->use_nodbg_empty(T)) { + dropInstructionKeepingImpDefs(MovT); } else { Xop.setIsKill(false); for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) { unsigned OpNo = MovT.getNumExplicitOperands() + I; const MachineOperand &Op = MovT.getOperand(OpNo); - if (Op.isKill() && TRI.regsOverlap(X, Op.getReg())) + if (Op.isKill() && TRI->regsOverlap(X, Op.getReg())) MovT.removeOperand(OpNo); } } @@ -594,10 +604,12 @@ if (skipFunction(MF.getFunction())) return false; - MachineRegisterInfo &MRI = MF.getRegInfo(); - const GCNSubtarget &ST = MF.getSubtarget(); - const SIInstrInfo *TII = ST.getInstrInfo(); - unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC; + MRI = &MF.getRegInfo(); + ST = &MF.getSubtarget(); + TII = ST->getInstrInfo(); + TRI = &TII->getRegisterInfo(); + + unsigned VCCReg = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC; std::vector I1Defs; @@ -622,7 +634,7 @@ MachineOperand &Src = MI.getOperand(1); if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) { int32_t ReverseImm; - if (isReverseInlineImm(TII, Src, ReverseImm)) { + if (isReverseInlineImm(Src, ReverseImm)) { MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32)); Src.setImm(ReverseImm); continue; @@ -630,9 +642,9 @@ } } - if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 || - MI.getOpcode() == AMDGPU::COPY)) { - if (auto *NextMI = matchSwap(MI, MRI, TII)) { + if (ST->hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 || + MI.getOpcode() == AMDGPU::COPY)) { + if (auto *NextMI = matchSwap(MI)) { Next = NextMI->getIterator(); continue; } @@ -658,13 +670,13 @@ // we have a vector add of a constant, we usually don't get the correct // allocation due to the subregister usage. if (Dest->getReg().isVirtual() && Src0->isReg()) { - MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg()); - MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg()); + MRI->setRegAllocationHint(Dest->getReg(), 0, Src0->getReg()); + MRI->setRegAllocationHint(Src0->getReg(), 0, Dest->getReg()); continue; } if (Src0->isReg() && Src0->getReg() == Dest->getReg()) { - if (Src1->isImm() && isKImmOperand(TII, *Src1)) { + if (Src1->isImm() && isKImmOperand(*Src1)) { unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ? AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32; @@ -676,7 +688,7 @@ // Try to use s_cmpk_* if (MI.isCompare() && TII->isSOPC(MI)) { - shrinkScalarCompare(TII, MI); + shrinkScalarCompare(MI); continue; } @@ -687,9 +699,9 @@ if (Src.isImm() && Dst.getReg().isPhysical()) { int32_t ReverseImm; - if (isKImmOperand(TII, Src)) + if (isKImmOperand(Src)) MI.setDesc(TII->get(AMDGPU::S_MOVK_I32)); - else if (isReverseInlineImm(TII, Src, ReverseImm)) { + else if (isReverseInlineImm(Src, ReverseImm)) { MI.setDesc(TII->get(AMDGPU::S_BREV_B32)); Src.setImm(ReverseImm); } @@ -702,12 +714,12 @@ if (MI.getOpcode() == AMDGPU::S_AND_B32 || MI.getOpcode() == AMDGPU::S_OR_B32 || MI.getOpcode() == AMDGPU::S_XOR_B32) { - if (shrinkScalarLogicOp(ST, MRI, TII, MI)) + if (shrinkScalarLogicOp(MI)) continue; } if (TII->isMIMG(MI.getOpcode()) && - ST.getGeneration() >= AMDGPUSubtarget::GFX10 && + ST->getGeneration() >= AMDGPUSubtarget::GFX10 && MF.getProperties().hasProperty( MachineFunctionProperties::Property::NoVRegs)) { shrinkMIMG(MI); @@ -717,11 +729,11 @@ if (!TII->hasVALU32BitEncoding(MI.getOpcode())) continue; - if (!TII->canShrink(MI, MRI)) { + if (!TII->canShrink(MI, *MRI)) { // Try commuting the instruction and see if that enables us to shrink // it. if (!MI.isCommutable() || !TII->commuteInstruction(MI) || - !TII->canShrink(MI, MRI)) + !TII->canShrink(MI, *MRI)) continue; } @@ -743,7 +755,7 @@ // provide a hint to the register allocator to use VCC and then we // will run this pass again after RA and shrink it if it outputs to // VCC. - MRI.setRegAllocationHint(DstReg, 0, VCCReg); + MRI->setRegAllocationHint(DstReg, 0, VCCReg); continue; } if (DstReg != VCCReg) @@ -760,7 +772,7 @@ continue; Register SReg = Src2->getReg(); if (SReg.isVirtual()) { - MRI.setRegAllocationHint(SReg, 0, VCCReg); + MRI->setRegAllocationHint(SReg, 0, VCCReg); continue; } if (SReg != VCCReg) @@ -776,7 +788,7 @@ if (SDst->getReg() != VCCReg) { if (SDst->getReg().isVirtual()) - MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg); + MRI->setRegAllocationHint(SDst->getReg(), 0, VCCReg); Next = true; } @@ -786,7 +798,7 @@ AMDGPU::OpName::src2); if (Src2 && Src2->getReg() != VCCReg) { if (Src2->getReg().isVirtual()) - MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg); + MRI->setRegAllocationHint(Src2->getReg(), 0, VCCReg); Next = true; } @@ -801,14 +813,14 @@ ++NumInstructionsShrunk; // Copy extra operands not present in the instruction definition. - copyExtraImplicitOps(*Inst32, MF, MI); + copyExtraImplicitOps(*Inst32, MI); // Copy deadness from the old explicit vcc def to the new implicit def. if (SDst && SDst->isDead()) Inst32->findRegisterDefOperand(VCCReg)->setIsDead(); MI.eraseFromParent(); - foldImmediates(*Inst32, TII, MRI); + foldImmediates(*Inst32); LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n'); }