Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -271,17 +271,9 @@ } MachineOperand *New = Fold.OpToFold; - if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && - TargetRegisterInfo::isVirtualRegister(New->getReg())) { - Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); - - Old.setIsUndef(New->isUndef()); - return true; - } - - // FIXME: Handle physical registers. - - return false; + Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); + Old.setIsUndef(New->isUndef()); + return true; } static bool isUseMIInFoldList(ArrayRef FoldList, @@ -503,7 +495,6 @@ } else { if (UseMI->isCopy() && OpToFold.isReg() && TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) && - TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(1).getReg()) && TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) && !UseMI->getOperand(1).getSubReg()) { @@ -578,14 +569,10 @@ const TargetRegisterClass *FoldRC = TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); - // Split 64-bit constants into 32-bits for folding. if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { unsigned UseReg = UseOp.getReg(); - const TargetRegisterClass *UseRC - = TargetRegisterInfo::isVirtualRegister(UseReg) ? - MRI->getRegClass(UseReg) : - TRI->getPhysRegClass(UseReg); + const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) return;